Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.655
      1 /*	$NetBSD: if_wm.c,v 1.655 2019/12/11 10:28:19 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.655 2019/12/11 10:28:19 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 
    109 #include <sys/rndsource.h>
    110 
    111 #include <net/if.h>
    112 #include <net/if_dl.h>
    113 #include <net/if_media.h>
    114 #include <net/if_ether.h>
    115 
    116 #include <net/bpf.h>
    117 
    118 #include <net/rss_config.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/mdio.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
    161 #else
    162 #define	DPRINTF(x, y)	__nothing
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256U
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 	krndsource_t rnd_source;	/* random source */
    462 };
    463 
    464 struct wm_phyop {
    465 	int (*acquire)(struct wm_softc *);
    466 	void (*release)(struct wm_softc *);
    467 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    468 	int (*writereg_locked)(device_t, int, int, uint16_t);
    469 	int reset_delay_us;
    470 };
    471 
    472 struct wm_nvmop {
    473 	int (*acquire)(struct wm_softc *);
    474 	void (*release)(struct wm_softc *);
    475 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    476 };
    477 
    478 /*
    479  * Software state per device.
    480  */
    481 struct wm_softc {
    482 	device_t sc_dev;		/* generic device information */
    483 	bus_space_tag_t sc_st;		/* bus space tag */
    484 	bus_space_handle_t sc_sh;	/* bus space handle */
    485 	bus_size_t sc_ss;		/* bus space size */
    486 	bus_space_tag_t sc_iot;		/* I/O space tag */
    487 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    488 	bus_size_t sc_ios;		/* I/O space size */
    489 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    490 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    491 	bus_size_t sc_flashs;		/* flash registers space size */
    492 	off_t sc_flashreg_offset;	/*
    493 					 * offset to flash registers from
    494 					 * start of BAR
    495 					 */
    496 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    497 
    498 	struct ethercom sc_ethercom;	/* ethernet common data */
    499 	struct mii_data sc_mii;		/* MII/media information */
    500 
    501 	pci_chipset_tag_t sc_pc;
    502 	pcitag_t sc_pcitag;
    503 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    504 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    505 
    506 	uint16_t sc_pcidevid;		/* PCI device ID */
    507 	wm_chip_type sc_type;		/* MAC type */
    508 	int sc_rev;			/* MAC revision */
    509 	wm_phy_type sc_phytype;		/* PHY type */
    510 	uint8_t sc_sfptype;		/* SFP type */
    511 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    512 #define	WM_MEDIATYPE_UNKNOWN		0x00
    513 #define	WM_MEDIATYPE_FIBER		0x01
    514 #define	WM_MEDIATYPE_COPPER		0x02
    515 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    516 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    517 	int sc_flags;			/* flags; see below */
    518 	u_short sc_if_flags;		/* last if_flags */
    519 	int sc_ec_capenable;		/* last ec_capenable */
    520 	int sc_flowflags;		/* 802.3x flow control flags */
    521 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    522 	int sc_align_tweak;
    523 
    524 	void *sc_ihs[WM_MAX_NINTR];	/*
    525 					 * interrupt cookie.
    526 					 * - legacy and msi use sc_ihs[0] only
    527 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    528 					 */
    529 	pci_intr_handle_t *sc_intrs;	/*
    530 					 * legacy and msi use sc_intrs[0] only
    531 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    532 					 */
    533 	int sc_nintrs;			/* number of interrupts */
    534 
    535 	int sc_link_intr_idx;		/* index of MSI-X tables */
    536 
    537 	callout_t sc_tick_ch;		/* tick callout */
    538 	bool sc_core_stopping;
    539 
    540 	int sc_nvm_ver_major;
    541 	int sc_nvm_ver_minor;
    542 	int sc_nvm_ver_build;
    543 	int sc_nvm_addrbits;		/* NVM address bits */
    544 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    545 	int sc_ich8_flash_base;
    546 	int sc_ich8_flash_bank_size;
    547 	int sc_nvm_k1_enabled;
    548 
    549 	int sc_nqueues;
    550 	struct wm_queue *sc_queue;
    551 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    552 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    553 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    554 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    555 
    556 	int sc_affinity_offset;
    557 
    558 #ifdef WM_EVENT_COUNTERS
    559 	/* Event counters. */
    560 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    561 
    562 	/* WM_T_82542_2_1 only */
    563 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    564 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    565 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    566 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    567 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    568 #endif /* WM_EVENT_COUNTERS */
    569 
    570 	/* This variable are used only on the 82547. */
    571 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    572 
    573 	uint32_t sc_ctrl;		/* prototype CTRL register */
    574 #if 0
    575 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    576 #endif
    577 	uint32_t sc_icr;		/* prototype interrupt bits */
    578 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    579 	uint32_t sc_tctl;		/* prototype TCTL register */
    580 	uint32_t sc_rctl;		/* prototype RCTL register */
    581 	uint32_t sc_txcw;		/* prototype TXCW register */
    582 	uint32_t sc_tipg;		/* prototype TIPG register */
    583 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    584 	uint32_t sc_pba;		/* prototype PBA register */
    585 
    586 	int sc_tbi_linkup;		/* TBI link status */
    587 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    588 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    589 
    590 	int sc_mchash_type;		/* multicast filter offset */
    591 
    592 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    593 
    594 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    595 	kmutex_t *sc_ich_phymtx;	/*
    596 					 * 82574/82583/ICH/PCH specific PHY
    597 					 * mutex. For 82574/82583, the mutex
    598 					 * is used for both PHY and NVM.
    599 					 */
    600 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    601 
    602 	struct wm_phyop phy;
    603 	struct wm_nvmop nvm;
    604 };
    605 
    606 #define WM_CORE_LOCK(_sc)						\
    607 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    608 #define WM_CORE_UNLOCK(_sc)						\
    609 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    610 #define WM_CORE_LOCKED(_sc)						\
    611 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    612 
    613 #define	WM_RXCHAIN_RESET(rxq)						\
    614 do {									\
    615 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    616 	*(rxq)->rxq_tailp = NULL;					\
    617 	(rxq)->rxq_len = 0;						\
    618 } while (/*CONSTCOND*/0)
    619 
    620 #define	WM_RXCHAIN_LINK(rxq, m)						\
    621 do {									\
    622 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    623 	(rxq)->rxq_tailp = &(m)->m_next;				\
    624 } while (/*CONSTCOND*/0)
    625 
    626 #ifdef WM_EVENT_COUNTERS
    627 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    628 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    629 
    630 #define WM_Q_EVCNT_INCR(qname, evname)			\
    631 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    632 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    633 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    634 #else /* !WM_EVENT_COUNTERS */
    635 #define	WM_EVCNT_INCR(ev)	/* nothing */
    636 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    637 
    638 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    639 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    640 #endif /* !WM_EVENT_COUNTERS */
    641 
    642 #define	CSR_READ(sc, reg)						\
    643 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    644 #define	CSR_WRITE(sc, reg, val)						\
    645 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    646 #define	CSR_WRITE_FLUSH(sc)						\
    647 	(void)CSR_READ((sc), WMREG_STATUS)
    648 
    649 #define ICH8_FLASH_READ32(sc, reg)					\
    650 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    651 	    (reg) + sc->sc_flashreg_offset)
    652 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    653 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    654 	    (reg) + sc->sc_flashreg_offset, (data))
    655 
    656 #define ICH8_FLASH_READ16(sc, reg)					\
    657 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    658 	    (reg) + sc->sc_flashreg_offset)
    659 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    660 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    661 	    (reg) + sc->sc_flashreg_offset, (data))
    662 
    663 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    664 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    665 
    666 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    667 #define	WM_CDTXADDR_HI(txq, x)						\
    668 	(sizeof(bus_addr_t) == 8 ?					\
    669 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    670 
    671 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    672 #define	WM_CDRXADDR_HI(rxq, x)						\
    673 	(sizeof(bus_addr_t) == 8 ?					\
    674 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    675 
    676 /*
    677  * Register read/write functions.
    678  * Other than CSR_{READ|WRITE}().
    679  */
    680 #if 0
    681 static inline uint32_t wm_io_read(struct wm_softc *, int);
    682 #endif
    683 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    684 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    685     uint32_t, uint32_t);
    686 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    687 
    688 /*
    689  * Descriptor sync/init functions.
    690  */
    691 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    692 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    693 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    694 
    695 /*
    696  * Device driver interface functions and commonly used functions.
    697  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    698  */
    699 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    700 static int	wm_match(device_t, cfdata_t, void *);
    701 static void	wm_attach(device_t, device_t, void *);
    702 static int	wm_detach(device_t, int);
    703 static bool	wm_suspend(device_t, const pmf_qual_t *);
    704 static bool	wm_resume(device_t, const pmf_qual_t *);
    705 static void	wm_watchdog(struct ifnet *);
    706 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    707     uint16_t *);
    708 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    709     uint16_t *);
    710 static void	wm_tick(void *);
    711 static int	wm_ifflags_cb(struct ethercom *);
    712 static int	wm_ioctl(struct ifnet *, u_long, void *);
    713 /* MAC address related */
    714 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    715 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    716 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    717 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    718 static int	wm_rar_count(struct wm_softc *);
    719 static void	wm_set_filter(struct wm_softc *);
    720 /* Reset and init related */
    721 static void	wm_set_vlan(struct wm_softc *);
    722 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    723 static void	wm_get_auto_rd_done(struct wm_softc *);
    724 static void	wm_lan_init_done(struct wm_softc *);
    725 static void	wm_get_cfg_done(struct wm_softc *);
    726 static int	wm_phy_post_reset(struct wm_softc *);
    727 static int	wm_write_smbus_addr(struct wm_softc *);
    728 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    729 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    730 static void	wm_initialize_hardware_bits(struct wm_softc *);
    731 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    732 static int	wm_reset_phy(struct wm_softc *);
    733 static void	wm_flush_desc_rings(struct wm_softc *);
    734 static void	wm_reset(struct wm_softc *);
    735 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    736 static void	wm_rxdrain(struct wm_rxqueue *);
    737 static void	wm_init_rss(struct wm_softc *);
    738 static void	wm_adjust_qnum(struct wm_softc *, int);
    739 static inline bool	wm_is_using_msix(struct wm_softc *);
    740 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    741 static int	wm_softint_establish(struct wm_softc *, int, int);
    742 static int	wm_setup_legacy(struct wm_softc *);
    743 static int	wm_setup_msix(struct wm_softc *);
    744 static int	wm_init(struct ifnet *);
    745 static int	wm_init_locked(struct ifnet *);
    746 static void	wm_unset_stopping_flags(struct wm_softc *);
    747 static void	wm_set_stopping_flags(struct wm_softc *);
    748 static void	wm_stop(struct ifnet *, int);
    749 static void	wm_stop_locked(struct ifnet *, int);
    750 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    751 static void	wm_82547_txfifo_stall(void *);
    752 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    753 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    754 /* DMA related */
    755 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    756 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    757 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    758 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    759     struct wm_txqueue *);
    760 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    761 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    762 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    763     struct wm_rxqueue *);
    764 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    765 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    766 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    767 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    768 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    769 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    770 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    771     struct wm_txqueue *);
    772 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    773     struct wm_rxqueue *);
    774 static int	wm_alloc_txrx_queues(struct wm_softc *);
    775 static void	wm_free_txrx_queues(struct wm_softc *);
    776 static int	wm_init_txrx_queues(struct wm_softc *);
    777 /* Start */
    778 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    779     struct wm_txsoft *, uint32_t *, uint8_t *);
    780 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    781 static void	wm_start(struct ifnet *);
    782 static void	wm_start_locked(struct ifnet *);
    783 static int	wm_transmit(struct ifnet *, struct mbuf *);
    784 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    785 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    786     bool);
    787 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    788     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    789 static void	wm_nq_start(struct ifnet *);
    790 static void	wm_nq_start_locked(struct ifnet *);
    791 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    792 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    793 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    794     bool);
    795 static void	wm_deferred_start_locked(struct wm_txqueue *);
    796 static void	wm_handle_queue(void *);
    797 /* Interrupt */
    798 static bool	wm_txeof(struct wm_txqueue *, u_int);
    799 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    800 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    801 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    802 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    803 static void	wm_linkintr(struct wm_softc *, uint32_t);
    804 static int	wm_intr_legacy(void *);
    805 static inline void	wm_txrxintr_disable(struct wm_queue *);
    806 static inline void	wm_txrxintr_enable(struct wm_queue *);
    807 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    808 static int	wm_txrxintr_msix(void *);
    809 static int	wm_linkintr_msix(void *);
    810 
    811 /*
    812  * Media related.
    813  * GMII, SGMII, TBI, SERDES and SFP.
    814  */
    815 /* Common */
    816 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    817 /* GMII related */
    818 static void	wm_gmii_reset(struct wm_softc *);
    819 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    820 static int	wm_get_phy_id_82575(struct wm_softc *);
    821 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    822 static int	wm_gmii_mediachange(struct ifnet *);
    823 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    824 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    825 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    826 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    827 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    828 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    829 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    830 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    831 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    832 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    833 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    834 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    835 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    836 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    837 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    838 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    839 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    840 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    841 	bool);
    842 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    843 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    844 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    845 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    846 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    847 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    848 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    849 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    850 static void	wm_gmii_statchg(struct ifnet *);
    851 /*
    852  * kumeran related (80003, ICH* and PCH*).
    853  * These functions are not for accessing MII registers but for accessing
    854  * kumeran specific registers.
    855  */
    856 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    857 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    858 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    859 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    860 /* EMI register related */
    861 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    862 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    863 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    864 /* SGMII */
    865 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    866 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    867 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    868 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    869 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    870 /* TBI related */
    871 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    872 static void	wm_tbi_mediainit(struct wm_softc *);
    873 static int	wm_tbi_mediachange(struct ifnet *);
    874 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    875 static int	wm_check_for_link(struct wm_softc *);
    876 static void	wm_tbi_tick(struct wm_softc *);
    877 /* SERDES related */
    878 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    879 static int	wm_serdes_mediachange(struct ifnet *);
    880 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    881 static void	wm_serdes_tick(struct wm_softc *);
    882 /* SFP related */
    883 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    884 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    885 
    886 /*
    887  * NVM related.
    888  * Microwire, SPI (w/wo EERD) and Flash.
    889  */
    890 /* Misc functions */
    891 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    892 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    893 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    894 /* Microwire */
    895 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    896 /* SPI */
    897 static int	wm_nvm_ready_spi(struct wm_softc *);
    898 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    899 /* Using with EERD */
    900 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    901 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    902 /* Flash */
    903 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    904     unsigned int *);
    905 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    906 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    907 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    908     uint32_t *);
    909 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    910 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    911 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    912 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    913 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    914 /* iNVM */
    915 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    916 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    917 /* Lock, detecting NVM type, validate checksum and read */
    918 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    919 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    920 static int	wm_nvm_validate_checksum(struct wm_softc *);
    921 static void	wm_nvm_version_invm(struct wm_softc *);
    922 static void	wm_nvm_version(struct wm_softc *);
    923 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    924 
    925 /*
    926  * Hardware semaphores.
    927  * Very complexed...
    928  */
    929 static int	wm_get_null(struct wm_softc *);
    930 static void	wm_put_null(struct wm_softc *);
    931 static int	wm_get_eecd(struct wm_softc *);
    932 static void	wm_put_eecd(struct wm_softc *);
    933 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    934 static void	wm_put_swsm_semaphore(struct wm_softc *);
    935 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    936 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    937 static int	wm_get_nvm_80003(struct wm_softc *);
    938 static void	wm_put_nvm_80003(struct wm_softc *);
    939 static int	wm_get_nvm_82571(struct wm_softc *);
    940 static void	wm_put_nvm_82571(struct wm_softc *);
    941 static int	wm_get_phy_82575(struct wm_softc *);
    942 static void	wm_put_phy_82575(struct wm_softc *);
    943 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    944 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    945 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    946 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    947 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    948 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    949 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    950 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    951 
    952 /*
    953  * Management mode and power management related subroutines.
    954  * BMC, AMT, suspend/resume and EEE.
    955  */
    956 #if 0
    957 static int	wm_check_mng_mode(struct wm_softc *);
    958 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    959 static int	wm_check_mng_mode_82574(struct wm_softc *);
    960 static int	wm_check_mng_mode_generic(struct wm_softc *);
    961 #endif
    962 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    963 static bool	wm_phy_resetisblocked(struct wm_softc *);
    964 static void	wm_get_hw_control(struct wm_softc *);
    965 static void	wm_release_hw_control(struct wm_softc *);
    966 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    967 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    968 static void	wm_init_manageability(struct wm_softc *);
    969 static void	wm_release_manageability(struct wm_softc *);
    970 static void	wm_get_wakeup(struct wm_softc *);
    971 static int	wm_ulp_disable(struct wm_softc *);
    972 static int	wm_enable_phy_wakeup(struct wm_softc *);
    973 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    974 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    975 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    976 static void	wm_enable_wakeup(struct wm_softc *);
    977 static void	wm_disable_aspm(struct wm_softc *);
    978 /* LPLU (Low Power Link Up) */
    979 static void	wm_lplu_d0_disable(struct wm_softc *);
    980 /* EEE */
    981 static int	wm_set_eee_i350(struct wm_softc *);
    982 static int	wm_set_eee_pchlan(struct wm_softc *);
    983 static int	wm_set_eee(struct wm_softc *);
    984 
    985 /*
    986  * Workarounds (mainly PHY related).
    987  * Basically, PHY's workarounds are in the PHY drivers.
    988  */
    989 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    990 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    991 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
    992 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
    993 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
    994 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    995 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    996 static int	wm_k1_workaround_lv(struct wm_softc *);
    997 static int	wm_link_stall_workaround_hv(struct wm_softc *);
    998 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    999 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1000 static void	wm_reset_init_script_82575(struct wm_softc *);
   1001 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1002 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1003 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1004 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1005 static int	wm_pll_workaround_i210(struct wm_softc *);
   1006 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1007 
   1008 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1009     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1010 
   1011 /*
   1012  * Devices supported by this driver.
   1013  */
   1014 static const struct wm_product {
   1015 	pci_vendor_id_t		wmp_vendor;
   1016 	pci_product_id_t	wmp_product;
   1017 	const char		*wmp_name;
   1018 	wm_chip_type		wmp_type;
   1019 	uint32_t		wmp_flags;
   1020 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1021 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1022 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1023 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1024 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1025 } wm_products[] = {
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1027 	  "Intel i82542 1000BASE-X Ethernet",
   1028 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1031 	  "Intel i82543GC 1000BASE-X Ethernet",
   1032 	  WM_T_82543,		WMP_F_FIBER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1035 	  "Intel i82543GC 1000BASE-T Ethernet",
   1036 	  WM_T_82543,		WMP_F_COPPER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1039 	  "Intel i82544EI 1000BASE-T Ethernet",
   1040 	  WM_T_82544,		WMP_F_COPPER },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1043 	  "Intel i82544EI 1000BASE-X Ethernet",
   1044 	  WM_T_82544,		WMP_F_FIBER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1047 	  "Intel i82544GC 1000BASE-T Ethernet",
   1048 	  WM_T_82544,		WMP_F_COPPER },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1051 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1052 	  WM_T_82544,		WMP_F_COPPER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1055 	  "Intel i82540EM 1000BASE-T Ethernet",
   1056 	  WM_T_82540,		WMP_F_COPPER },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1059 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1060 	  WM_T_82540,		WMP_F_COPPER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1063 	  "Intel i82540EP 1000BASE-T Ethernet",
   1064 	  WM_T_82540,		WMP_F_COPPER },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1067 	  "Intel i82540EP 1000BASE-T Ethernet",
   1068 	  WM_T_82540,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1071 	  "Intel i82540EP 1000BASE-T Ethernet",
   1072 	  WM_T_82540,		WMP_F_COPPER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1075 	  "Intel i82545EM 1000BASE-T Ethernet",
   1076 	  WM_T_82545,		WMP_F_COPPER },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1079 	  "Intel i82545GM 1000BASE-T Ethernet",
   1080 	  WM_T_82545_3,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1083 	  "Intel i82545GM 1000BASE-X Ethernet",
   1084 	  WM_T_82545_3,		WMP_F_FIBER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1087 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1088 	  WM_T_82545_3,		WMP_F_SERDES },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1091 	  "Intel i82546EB 1000BASE-T Ethernet",
   1092 	  WM_T_82546,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1095 	  "Intel i82546EB 1000BASE-T Ethernet",
   1096 	  WM_T_82546,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1099 	  "Intel i82545EM 1000BASE-X Ethernet",
   1100 	  WM_T_82545,		WMP_F_FIBER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1103 	  "Intel i82546EB 1000BASE-X Ethernet",
   1104 	  WM_T_82546,		WMP_F_FIBER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1107 	  "Intel i82546GB 1000BASE-T Ethernet",
   1108 	  WM_T_82546_3,		WMP_F_COPPER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1111 	  "Intel i82546GB 1000BASE-X Ethernet",
   1112 	  WM_T_82546_3,		WMP_F_FIBER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1115 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1116 	  WM_T_82546_3,		WMP_F_SERDES },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1119 	  "i82546GB quad-port Gigabit Ethernet",
   1120 	  WM_T_82546_3,		WMP_F_COPPER },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1123 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1124 	  WM_T_82546_3,		WMP_F_COPPER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1127 	  "Intel PRO/1000MT (82546GB)",
   1128 	  WM_T_82546_3,		WMP_F_COPPER },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1131 	  "Intel i82541EI 1000BASE-T Ethernet",
   1132 	  WM_T_82541,		WMP_F_COPPER },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1135 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1136 	  WM_T_82541,		WMP_F_COPPER },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1139 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1140 	  WM_T_82541,		WMP_F_COPPER },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1143 	  "Intel i82541ER 1000BASE-T Ethernet",
   1144 	  WM_T_82541_2,		WMP_F_COPPER },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1147 	  "Intel i82541GI 1000BASE-T Ethernet",
   1148 	  WM_T_82541_2,		WMP_F_COPPER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1151 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1152 	  WM_T_82541_2,		WMP_F_COPPER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1155 	  "Intel i82541PI 1000BASE-T Ethernet",
   1156 	  WM_T_82541_2,		WMP_F_COPPER },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1159 	  "Intel i82547EI 1000BASE-T Ethernet",
   1160 	  WM_T_82547,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1163 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1164 	  WM_T_82547,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1167 	  "Intel i82547GI 1000BASE-T Ethernet",
   1168 	  WM_T_82547_2,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1171 	  "Intel PRO/1000 PT (82571EB)",
   1172 	  WM_T_82571,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1175 	  "Intel PRO/1000 PF (82571EB)",
   1176 	  WM_T_82571,		WMP_F_FIBER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1179 	  "Intel PRO/1000 PB (82571EB)",
   1180 	  WM_T_82571,		WMP_F_SERDES },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1183 	  "Intel PRO/1000 QT (82571EB)",
   1184 	  WM_T_82571,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1187 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1188 	  WM_T_82571,		WMP_F_COPPER, },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1191 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1192 	  WM_T_82571,		WMP_F_COPPER, },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1195 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1196 	  WM_T_82571,		WMP_F_SERDES, },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1199 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1200 	  WM_T_82571,		WMP_F_SERDES, },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1203 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1204 	  WM_T_82571,		WMP_F_FIBER, },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1207 	  "Intel i82572EI 1000baseT Ethernet",
   1208 	  WM_T_82572,		WMP_F_COPPER },
   1209 
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1211 	  "Intel i82572EI 1000baseX Ethernet",
   1212 	  WM_T_82572,		WMP_F_FIBER },
   1213 
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1215 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1216 	  WM_T_82572,		WMP_F_SERDES },
   1217 
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1219 	  "Intel i82572EI 1000baseT Ethernet",
   1220 	  WM_T_82572,		WMP_F_COPPER },
   1221 
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1223 	  "Intel i82573E",
   1224 	  WM_T_82573,		WMP_F_COPPER },
   1225 
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1227 	  "Intel i82573E IAMT",
   1228 	  WM_T_82573,		WMP_F_COPPER },
   1229 
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1231 	  "Intel i82573L Gigabit Ethernet",
   1232 	  WM_T_82573,		WMP_F_COPPER },
   1233 
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1235 	  "Intel i82574L",
   1236 	  WM_T_82574,		WMP_F_COPPER },
   1237 
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1239 	  "Intel i82574L",
   1240 	  WM_T_82574,		WMP_F_COPPER },
   1241 
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1243 	  "Intel i82583V",
   1244 	  WM_T_82583,		WMP_F_COPPER },
   1245 
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1247 	  "i80003 dual 1000baseT Ethernet",
   1248 	  WM_T_80003,		WMP_F_COPPER },
   1249 
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1251 	  "i80003 dual 1000baseX Ethernet",
   1252 	  WM_T_80003,		WMP_F_COPPER },
   1253 
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1255 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1256 	  WM_T_80003,		WMP_F_SERDES },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1259 	  "Intel i80003 1000baseT Ethernet",
   1260 	  WM_T_80003,		WMP_F_COPPER },
   1261 
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1263 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1264 	  WM_T_80003,		WMP_F_SERDES },
   1265 
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1267 	  "Intel i82801H (M_AMT) LAN Controller",
   1268 	  WM_T_ICH8,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1270 	  "Intel i82801H (AMT) LAN Controller",
   1271 	  WM_T_ICH8,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1273 	  "Intel i82801H LAN Controller",
   1274 	  WM_T_ICH8,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1276 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1277 	  WM_T_ICH8,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1279 	  "Intel i82801H (M) LAN Controller",
   1280 	  WM_T_ICH8,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1282 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1283 	  WM_T_ICH8,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1285 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1286 	  WM_T_ICH8,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1288 	  "82567V-3 LAN Controller",
   1289 	  WM_T_ICH8,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1291 	  "82801I (AMT) LAN Controller",
   1292 	  WM_T_ICH9,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1294 	  "82801I 10/100 LAN Controller",
   1295 	  WM_T_ICH9,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1297 	  "82801I (G) 10/100 LAN Controller",
   1298 	  WM_T_ICH9,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1300 	  "82801I (GT) 10/100 LAN Controller",
   1301 	  WM_T_ICH9,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1303 	  "82801I (C) LAN Controller",
   1304 	  WM_T_ICH9,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1306 	  "82801I mobile LAN Controller",
   1307 	  WM_T_ICH9,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1309 	  "82801I mobile (V) LAN Controller",
   1310 	  WM_T_ICH9,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1312 	  "82801I mobile (AMT) LAN Controller",
   1313 	  WM_T_ICH9,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1315 	  "82567LM-4 LAN Controller",
   1316 	  WM_T_ICH9,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1318 	  "82567LM-2 LAN Controller",
   1319 	  WM_T_ICH10,		WMP_F_COPPER },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1321 	  "82567LF-2 LAN Controller",
   1322 	  WM_T_ICH10,		WMP_F_COPPER },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1324 	  "82567LM-3 LAN Controller",
   1325 	  WM_T_ICH10,		WMP_F_COPPER },
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1327 	  "82567LF-3 LAN Controller",
   1328 	  WM_T_ICH10,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1330 	  "82567V-2 LAN Controller",
   1331 	  WM_T_ICH10,		WMP_F_COPPER },
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1333 	  "82567V-3? LAN Controller",
   1334 	  WM_T_ICH10,		WMP_F_COPPER },
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1336 	  "HANKSVILLE LAN Controller",
   1337 	  WM_T_ICH10,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1339 	  "PCH LAN (82577LM) Controller",
   1340 	  WM_T_PCH,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1342 	  "PCH LAN (82577LC) Controller",
   1343 	  WM_T_PCH,		WMP_F_COPPER },
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1345 	  "PCH LAN (82578DM) Controller",
   1346 	  WM_T_PCH,		WMP_F_COPPER },
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1348 	  "PCH LAN (82578DC) Controller",
   1349 	  WM_T_PCH,		WMP_F_COPPER },
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1351 	  "PCH2 LAN (82579LM) Controller",
   1352 	  WM_T_PCH2,		WMP_F_COPPER },
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1354 	  "PCH2 LAN (82579V) Controller",
   1355 	  WM_T_PCH2,		WMP_F_COPPER },
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1357 	  "82575EB dual-1000baseT Ethernet",
   1358 	  WM_T_82575,		WMP_F_COPPER },
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1360 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1361 	  WM_T_82575,		WMP_F_SERDES },
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1363 	  "82575GB quad-1000baseT Ethernet",
   1364 	  WM_T_82575,		WMP_F_COPPER },
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1366 	  "82575GB quad-1000baseT Ethernet (PM)",
   1367 	  WM_T_82575,		WMP_F_COPPER },
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1369 	  "82576 1000BaseT Ethernet",
   1370 	  WM_T_82576,		WMP_F_COPPER },
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1372 	  "82576 1000BaseX Ethernet",
   1373 	  WM_T_82576,		WMP_F_FIBER },
   1374 
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1376 	  "82576 gigabit Ethernet (SERDES)",
   1377 	  WM_T_82576,		WMP_F_SERDES },
   1378 
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1380 	  "82576 quad-1000BaseT Ethernet",
   1381 	  WM_T_82576,		WMP_F_COPPER },
   1382 
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1384 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1385 	  WM_T_82576,		WMP_F_COPPER },
   1386 
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1388 	  "82576 gigabit Ethernet",
   1389 	  WM_T_82576,		WMP_F_COPPER },
   1390 
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1392 	  "82576 gigabit Ethernet (SERDES)",
   1393 	  WM_T_82576,		WMP_F_SERDES },
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1395 	  "82576 quad-gigabit Ethernet (SERDES)",
   1396 	  WM_T_82576,		WMP_F_SERDES },
   1397 
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1399 	  "82580 1000BaseT Ethernet",
   1400 	  WM_T_82580,		WMP_F_COPPER },
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1402 	  "82580 1000BaseX Ethernet",
   1403 	  WM_T_82580,		WMP_F_FIBER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1406 	  "82580 1000BaseT Ethernet (SERDES)",
   1407 	  WM_T_82580,		WMP_F_SERDES },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1410 	  "82580 gigabit Ethernet (SGMII)",
   1411 	  WM_T_82580,		WMP_F_COPPER },
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1413 	  "82580 dual-1000BaseT Ethernet",
   1414 	  WM_T_82580,		WMP_F_COPPER },
   1415 
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1417 	  "82580 quad-1000BaseX Ethernet",
   1418 	  WM_T_82580,		WMP_F_FIBER },
   1419 
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1421 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1422 	  WM_T_82580,		WMP_F_COPPER },
   1423 
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1425 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1426 	  WM_T_82580,		WMP_F_SERDES },
   1427 
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1429 	  "DH89XXCC 1000BASE-KX Ethernet",
   1430 	  WM_T_82580,		WMP_F_SERDES },
   1431 
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1433 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1434 	  WM_T_82580,		WMP_F_SERDES },
   1435 
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1437 	  "I350 Gigabit Network Connection",
   1438 	  WM_T_I350,		WMP_F_COPPER },
   1439 
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1441 	  "I350 Gigabit Fiber Network Connection",
   1442 	  WM_T_I350,		WMP_F_FIBER },
   1443 
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1445 	  "I350 Gigabit Backplane Connection",
   1446 	  WM_T_I350,		WMP_F_SERDES },
   1447 
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1449 	  "I350 Quad Port Gigabit Ethernet",
   1450 	  WM_T_I350,		WMP_F_SERDES },
   1451 
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1453 	  "I350 Gigabit Connection",
   1454 	  WM_T_I350,		WMP_F_COPPER },
   1455 
   1456 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1457 	  "I354 Gigabit Ethernet (KX)",
   1458 	  WM_T_I354,		WMP_F_SERDES },
   1459 
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1461 	  "I354 Gigabit Ethernet (SGMII)",
   1462 	  WM_T_I354,		WMP_F_COPPER },
   1463 
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1465 	  "I354 Gigabit Ethernet (2.5G)",
   1466 	  WM_T_I354,		WMP_F_COPPER },
   1467 
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1469 	  "I210-T1 Ethernet Server Adapter",
   1470 	  WM_T_I210,		WMP_F_COPPER },
   1471 
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1473 	  "I210 Ethernet (Copper OEM)",
   1474 	  WM_T_I210,		WMP_F_COPPER },
   1475 
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1477 	  "I210 Ethernet (Copper IT)",
   1478 	  WM_T_I210,		WMP_F_COPPER },
   1479 
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1481 	  "I210 Ethernet (Copper, FLASH less)",
   1482 	  WM_T_I210,		WMP_F_COPPER },
   1483 
   1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1485 	  "I210 Gigabit Ethernet (Fiber)",
   1486 	  WM_T_I210,		WMP_F_FIBER },
   1487 
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1489 	  "I210 Gigabit Ethernet (SERDES)",
   1490 	  WM_T_I210,		WMP_F_SERDES },
   1491 
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1493 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1494 	  WM_T_I210,		WMP_F_SERDES },
   1495 
   1496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1497 	  "I210 Gigabit Ethernet (SGMII)",
   1498 	  WM_T_I210,		WMP_F_COPPER },
   1499 
   1500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1501 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1502 	  WM_T_I210,		WMP_F_COPPER },
   1503 
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1505 	  "I211 Ethernet (COPPER)",
   1506 	  WM_T_I211,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1508 	  "I217 V Ethernet Connection",
   1509 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1511 	  "I217 LM Ethernet Connection",
   1512 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1514 	  "I218 V Ethernet Connection",
   1515 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1517 	  "I218 V Ethernet Connection",
   1518 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1520 	  "I218 V Ethernet Connection",
   1521 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1523 	  "I218 LM Ethernet Connection",
   1524 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1526 	  "I218 LM Ethernet Connection",
   1527 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1529 	  "I218 LM Ethernet Connection",
   1530 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1532 	  "I219 LM Ethernet Connection",
   1533 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1535 	  "I219 LM Ethernet Connection",
   1536 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1538 	  "I219 LM Ethernet Connection",
   1539 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1541 	  "I219 LM Ethernet Connection",
   1542 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1544 	  "I219 LM Ethernet Connection",
   1545 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1547 	  "I219 LM Ethernet Connection",
   1548 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1550 	  "I219 LM Ethernet Connection",
   1551 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1553 	  "I219 LM Ethernet Connection",
   1554 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1556 	  "I219 LM Ethernet Connection",
   1557 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1559 	  "I219 V Ethernet Connection",
   1560 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1562 	  "I219 V Ethernet Connection",
   1563 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1564 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1565 	  "I219 V Ethernet Connection",
   1566 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1567 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1568 	  "I219 V Ethernet Connection",
   1569 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1570 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1571 	  "I219 V Ethernet Connection",
   1572 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1573 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1574 	  "I219 V Ethernet Connection",
   1575 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1576 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1577 	  "I219 V Ethernet Connection",
   1578 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1579 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1580 	  "I219 V Ethernet Connection",
   1581 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1582 	{ 0,			0,
   1583 	  NULL,
   1584 	  0,			0 },
   1585 };
   1586 
   1587 /*
   1588  * Register read/write functions.
   1589  * Other than CSR_{READ|WRITE}().
   1590  */
   1591 
   1592 #if 0 /* Not currently used */
   1593 static inline uint32_t
   1594 wm_io_read(struct wm_softc *sc, int reg)
   1595 {
   1596 
   1597 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1598 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1599 }
   1600 #endif
   1601 
   1602 static inline void
   1603 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1604 {
   1605 
   1606 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1607 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1608 }
   1609 
   1610 static inline void
   1611 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1612     uint32_t data)
   1613 {
   1614 	uint32_t regval;
   1615 	int i;
   1616 
   1617 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1618 
   1619 	CSR_WRITE(sc, reg, regval);
   1620 
   1621 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1622 		delay(5);
   1623 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1624 			break;
   1625 	}
   1626 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1627 		aprint_error("%s: WARNING:"
   1628 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1629 		    device_xname(sc->sc_dev), reg);
   1630 	}
   1631 }
   1632 
   1633 static inline void
   1634 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1635 {
   1636 	wa->wa_low = htole32(v & 0xffffffffU);
   1637 	if (sizeof(bus_addr_t) == 8)
   1638 		wa->wa_high = htole32((uint64_t) v >> 32);
   1639 	else
   1640 		wa->wa_high = 0;
   1641 }
   1642 
   1643 /*
   1644  * Descriptor sync/init functions.
   1645  */
   1646 static inline void
   1647 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1648 {
   1649 	struct wm_softc *sc = txq->txq_sc;
   1650 
   1651 	/* If it will wrap around, sync to the end of the ring. */
   1652 	if ((start + num) > WM_NTXDESC(txq)) {
   1653 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1654 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1655 		    (WM_NTXDESC(txq) - start), ops);
   1656 		num -= (WM_NTXDESC(txq) - start);
   1657 		start = 0;
   1658 	}
   1659 
   1660 	/* Now sync whatever is left. */
   1661 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1662 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1663 }
   1664 
   1665 static inline void
   1666 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1667 {
   1668 	struct wm_softc *sc = rxq->rxq_sc;
   1669 
   1670 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1671 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1672 }
   1673 
   1674 static inline void
   1675 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1676 {
   1677 	struct wm_softc *sc = rxq->rxq_sc;
   1678 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1679 	struct mbuf *m = rxs->rxs_mbuf;
   1680 
   1681 	/*
   1682 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1683 	 * so that the payload after the Ethernet header is aligned
   1684 	 * to a 4-byte boundary.
   1685 
   1686 	 * XXX BRAINDAMAGE ALERT!
   1687 	 * The stupid chip uses the same size for every buffer, which
   1688 	 * is set in the Receive Control register.  We are using the 2K
   1689 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1690 	 * reason, we can't "scoot" packets longer than the standard
   1691 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1692 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1693 	 * the upper layer copy the headers.
   1694 	 */
   1695 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1696 
   1697 	if (sc->sc_type == WM_T_82574) {
   1698 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1699 		rxd->erx_data.erxd_addr =
   1700 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1701 		rxd->erx_data.erxd_dd = 0;
   1702 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1703 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1704 
   1705 		rxd->nqrx_data.nrxd_paddr =
   1706 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1707 		/* Currently, split header is not supported. */
   1708 		rxd->nqrx_data.nrxd_haddr = 0;
   1709 	} else {
   1710 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1711 
   1712 		wm_set_dma_addr(&rxd->wrx_addr,
   1713 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1714 		rxd->wrx_len = 0;
   1715 		rxd->wrx_cksum = 0;
   1716 		rxd->wrx_status = 0;
   1717 		rxd->wrx_errors = 0;
   1718 		rxd->wrx_special = 0;
   1719 	}
   1720 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1721 
   1722 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1723 }
   1724 
   1725 /*
   1726  * Device driver interface functions and commonly used functions.
   1727  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1728  */
   1729 
   1730 /* Lookup supported device table */
   1731 static const struct wm_product *
   1732 wm_lookup(const struct pci_attach_args *pa)
   1733 {
   1734 	const struct wm_product *wmp;
   1735 
   1736 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1737 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1738 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1739 			return wmp;
   1740 	}
   1741 	return NULL;
   1742 }
   1743 
   1744 /* The match function (ca_match) */
   1745 static int
   1746 wm_match(device_t parent, cfdata_t cf, void *aux)
   1747 {
   1748 	struct pci_attach_args *pa = aux;
   1749 
   1750 	if (wm_lookup(pa) != NULL)
   1751 		return 1;
   1752 
   1753 	return 0;
   1754 }
   1755 
   1756 /* The attach function (ca_attach) */
   1757 static void
   1758 wm_attach(device_t parent, device_t self, void *aux)
   1759 {
   1760 	struct wm_softc *sc = device_private(self);
   1761 	struct pci_attach_args *pa = aux;
   1762 	prop_dictionary_t dict;
   1763 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1764 	pci_chipset_tag_t pc = pa->pa_pc;
   1765 	int counts[PCI_INTR_TYPE_SIZE];
   1766 	pci_intr_type_t max_type;
   1767 	const char *eetype, *xname;
   1768 	bus_space_tag_t memt;
   1769 	bus_space_handle_t memh;
   1770 	bus_size_t memsize;
   1771 	int memh_valid;
   1772 	int i, error;
   1773 	const struct wm_product *wmp;
   1774 	prop_data_t ea;
   1775 	prop_number_t pn;
   1776 	uint8_t enaddr[ETHER_ADDR_LEN];
   1777 	char buf[256];
   1778 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1779 	pcireg_t preg, memtype;
   1780 	uint16_t eeprom_data, apme_mask;
   1781 	bool force_clear_smbi;
   1782 	uint32_t link_mode;
   1783 	uint32_t reg;
   1784 
   1785 	sc->sc_dev = self;
   1786 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1787 	sc->sc_core_stopping = false;
   1788 
   1789 	wmp = wm_lookup(pa);
   1790 #ifdef DIAGNOSTIC
   1791 	if (wmp == NULL) {
   1792 		printf("\n");
   1793 		panic("wm_attach: impossible");
   1794 	}
   1795 #endif
   1796 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1797 
   1798 	sc->sc_pc = pa->pa_pc;
   1799 	sc->sc_pcitag = pa->pa_tag;
   1800 
   1801 	if (pci_dma64_available(pa))
   1802 		sc->sc_dmat = pa->pa_dmat64;
   1803 	else
   1804 		sc->sc_dmat = pa->pa_dmat;
   1805 
   1806 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1807 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1808 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1809 
   1810 	sc->sc_type = wmp->wmp_type;
   1811 
   1812 	/* Set default function pointers */
   1813 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1814 	sc->phy.release = sc->nvm.release = wm_put_null;
   1815 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1816 
   1817 	if (sc->sc_type < WM_T_82543) {
   1818 		if (sc->sc_rev < 2) {
   1819 			aprint_error_dev(sc->sc_dev,
   1820 			    "i82542 must be at least rev. 2\n");
   1821 			return;
   1822 		}
   1823 		if (sc->sc_rev < 3)
   1824 			sc->sc_type = WM_T_82542_2_0;
   1825 	}
   1826 
   1827 	/*
   1828 	 * Disable MSI for Errata:
   1829 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1830 	 *
   1831 	 *  82544: Errata 25
   1832 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1833 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1834 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1835 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1836 	 *
   1837 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1838 	 *
   1839 	 *  82571 & 82572: Errata 63
   1840 	 */
   1841 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1842 	    || (sc->sc_type == WM_T_82572))
   1843 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1844 
   1845 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1846 	    || (sc->sc_type == WM_T_82580)
   1847 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1848 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1849 		sc->sc_flags |= WM_F_NEWQUEUE;
   1850 
   1851 	/* Set device properties (mactype) */
   1852 	dict = device_properties(sc->sc_dev);
   1853 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1854 
   1855 	/*
   1856 	 * Map the device.  All devices support memory-mapped acccess,
   1857 	 * and it is really required for normal operation.
   1858 	 */
   1859 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1860 	switch (memtype) {
   1861 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1862 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1863 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1864 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1865 		break;
   1866 	default:
   1867 		memh_valid = 0;
   1868 		break;
   1869 	}
   1870 
   1871 	if (memh_valid) {
   1872 		sc->sc_st = memt;
   1873 		sc->sc_sh = memh;
   1874 		sc->sc_ss = memsize;
   1875 	} else {
   1876 		aprint_error_dev(sc->sc_dev,
   1877 		    "unable to map device registers\n");
   1878 		return;
   1879 	}
   1880 
   1881 	/*
   1882 	 * In addition, i82544 and later support I/O mapped indirect
   1883 	 * register access.  It is not desirable (nor supported in
   1884 	 * this driver) to use it for normal operation, though it is
   1885 	 * required to work around bugs in some chip versions.
   1886 	 */
   1887 	if (sc->sc_type >= WM_T_82544) {
   1888 		/* First we have to find the I/O BAR. */
   1889 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1890 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1891 			if (memtype == PCI_MAPREG_TYPE_IO)
   1892 				break;
   1893 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1894 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1895 				i += 4;	/* skip high bits, too */
   1896 		}
   1897 		if (i < PCI_MAPREG_END) {
   1898 			/*
   1899 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1900 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1901 			 * It's no problem because newer chips has no this
   1902 			 * bug.
   1903 			 *
   1904 			 * The i8254x doesn't apparently respond when the
   1905 			 * I/O BAR is 0, which looks somewhat like it's not
   1906 			 * been configured.
   1907 			 */
   1908 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1909 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1910 				aprint_error_dev(sc->sc_dev,
   1911 				    "WARNING: I/O BAR at zero.\n");
   1912 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1913 					0, &sc->sc_iot, &sc->sc_ioh,
   1914 					NULL, &sc->sc_ios) == 0) {
   1915 				sc->sc_flags |= WM_F_IOH_VALID;
   1916 			} else
   1917 				aprint_error_dev(sc->sc_dev,
   1918 				    "WARNING: unable to map I/O space\n");
   1919 		}
   1920 
   1921 	}
   1922 
   1923 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1924 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1925 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1926 	if (sc->sc_type < WM_T_82542_2_1)
   1927 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1928 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1929 
   1930 	/* Power up chip */
   1931 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1932 	    && error != EOPNOTSUPP) {
   1933 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1934 		return;
   1935 	}
   1936 
   1937 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1938 	/*
   1939 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1940 	 * resource.
   1941 	 */
   1942 	if (sc->sc_nqueues > 1) {
   1943 		max_type = PCI_INTR_TYPE_MSIX;
   1944 		/*
   1945 		 *  82583 has a MSI-X capability in the PCI configuration space
   1946 		 * but it doesn't support it. At least the document doesn't
   1947 		 * say anything about MSI-X.
   1948 		 */
   1949 		counts[PCI_INTR_TYPE_MSIX]
   1950 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1951 	} else {
   1952 		max_type = PCI_INTR_TYPE_MSI;
   1953 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1954 	}
   1955 
   1956 	/* Allocation settings */
   1957 	counts[PCI_INTR_TYPE_MSI] = 1;
   1958 	counts[PCI_INTR_TYPE_INTX] = 1;
   1959 	/* overridden by disable flags */
   1960 	if (wm_disable_msi != 0) {
   1961 		counts[PCI_INTR_TYPE_MSI] = 0;
   1962 		if (wm_disable_msix != 0) {
   1963 			max_type = PCI_INTR_TYPE_INTX;
   1964 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1965 		}
   1966 	} else if (wm_disable_msix != 0) {
   1967 		max_type = PCI_INTR_TYPE_MSI;
   1968 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1969 	}
   1970 
   1971 alloc_retry:
   1972 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1973 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1974 		return;
   1975 	}
   1976 
   1977 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1978 		error = wm_setup_msix(sc);
   1979 		if (error) {
   1980 			pci_intr_release(pc, sc->sc_intrs,
   1981 			    counts[PCI_INTR_TYPE_MSIX]);
   1982 
   1983 			/* Setup for MSI: Disable MSI-X */
   1984 			max_type = PCI_INTR_TYPE_MSI;
   1985 			counts[PCI_INTR_TYPE_MSI] = 1;
   1986 			counts[PCI_INTR_TYPE_INTX] = 1;
   1987 			goto alloc_retry;
   1988 		}
   1989 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1990 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   1991 		error = wm_setup_legacy(sc);
   1992 		if (error) {
   1993 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1994 			    counts[PCI_INTR_TYPE_MSI]);
   1995 
   1996 			/* The next try is for INTx: Disable MSI */
   1997 			max_type = PCI_INTR_TYPE_INTX;
   1998 			counts[PCI_INTR_TYPE_INTX] = 1;
   1999 			goto alloc_retry;
   2000 		}
   2001 	} else {
   2002 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2003 		error = wm_setup_legacy(sc);
   2004 		if (error) {
   2005 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2006 			    counts[PCI_INTR_TYPE_INTX]);
   2007 			return;
   2008 		}
   2009 	}
   2010 
   2011 	/*
   2012 	 * Check the function ID (unit number of the chip).
   2013 	 */
   2014 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2015 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2016 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2017 	    || (sc->sc_type == WM_T_82580)
   2018 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2019 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2020 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2021 	else
   2022 		sc->sc_funcid = 0;
   2023 
   2024 	/*
   2025 	 * Determine a few things about the bus we're connected to.
   2026 	 */
   2027 	if (sc->sc_type < WM_T_82543) {
   2028 		/* We don't really know the bus characteristics here. */
   2029 		sc->sc_bus_speed = 33;
   2030 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2031 		/*
   2032 		 * CSA (Communication Streaming Architecture) is about as fast
   2033 		 * a 32-bit 66MHz PCI Bus.
   2034 		 */
   2035 		sc->sc_flags |= WM_F_CSA;
   2036 		sc->sc_bus_speed = 66;
   2037 		aprint_verbose_dev(sc->sc_dev,
   2038 		    "Communication Streaming Architecture\n");
   2039 		if (sc->sc_type == WM_T_82547) {
   2040 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2041 			callout_setfunc(&sc->sc_txfifo_ch,
   2042 			    wm_82547_txfifo_stall, sc);
   2043 			aprint_verbose_dev(sc->sc_dev,
   2044 			    "using 82547 Tx FIFO stall work-around\n");
   2045 		}
   2046 	} else if (sc->sc_type >= WM_T_82571) {
   2047 		sc->sc_flags |= WM_F_PCIE;
   2048 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2049 		    && (sc->sc_type != WM_T_ICH10)
   2050 		    && (sc->sc_type != WM_T_PCH)
   2051 		    && (sc->sc_type != WM_T_PCH2)
   2052 		    && (sc->sc_type != WM_T_PCH_LPT)
   2053 		    && (sc->sc_type != WM_T_PCH_SPT)
   2054 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2055 			/* ICH* and PCH* have no PCIe capability registers */
   2056 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2057 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2058 				NULL) == 0)
   2059 				aprint_error_dev(sc->sc_dev,
   2060 				    "unable to find PCIe capability\n");
   2061 		}
   2062 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2063 	} else {
   2064 		reg = CSR_READ(sc, WMREG_STATUS);
   2065 		if (reg & STATUS_BUS64)
   2066 			sc->sc_flags |= WM_F_BUS64;
   2067 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2068 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2069 
   2070 			sc->sc_flags |= WM_F_PCIX;
   2071 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2072 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2073 				aprint_error_dev(sc->sc_dev,
   2074 				    "unable to find PCIX capability\n");
   2075 			else if (sc->sc_type != WM_T_82545_3 &&
   2076 				 sc->sc_type != WM_T_82546_3) {
   2077 				/*
   2078 				 * Work around a problem caused by the BIOS
   2079 				 * setting the max memory read byte count
   2080 				 * incorrectly.
   2081 				 */
   2082 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2083 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2084 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2085 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2086 
   2087 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2088 				    PCIX_CMD_BYTECNT_SHIFT;
   2089 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2090 				    PCIX_STATUS_MAXB_SHIFT;
   2091 				if (bytecnt > maxb) {
   2092 					aprint_verbose_dev(sc->sc_dev,
   2093 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2094 					    512 << bytecnt, 512 << maxb);
   2095 					pcix_cmd = (pcix_cmd &
   2096 					    ~PCIX_CMD_BYTECNT_MASK) |
   2097 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2098 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2099 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2100 					    pcix_cmd);
   2101 				}
   2102 			}
   2103 		}
   2104 		/*
   2105 		 * The quad port adapter is special; it has a PCIX-PCIX
   2106 		 * bridge on the board, and can run the secondary bus at
   2107 		 * a higher speed.
   2108 		 */
   2109 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2110 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2111 								      : 66;
   2112 		} else if (sc->sc_flags & WM_F_PCIX) {
   2113 			switch (reg & STATUS_PCIXSPD_MASK) {
   2114 			case STATUS_PCIXSPD_50_66:
   2115 				sc->sc_bus_speed = 66;
   2116 				break;
   2117 			case STATUS_PCIXSPD_66_100:
   2118 				sc->sc_bus_speed = 100;
   2119 				break;
   2120 			case STATUS_PCIXSPD_100_133:
   2121 				sc->sc_bus_speed = 133;
   2122 				break;
   2123 			default:
   2124 				aprint_error_dev(sc->sc_dev,
   2125 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2126 				    reg & STATUS_PCIXSPD_MASK);
   2127 				sc->sc_bus_speed = 66;
   2128 				break;
   2129 			}
   2130 		} else
   2131 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2132 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2133 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2134 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2135 	}
   2136 
   2137 	/* clear interesting stat counters */
   2138 	CSR_READ(sc, WMREG_COLC);
   2139 	CSR_READ(sc, WMREG_RXERRC);
   2140 
   2141 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2142 	    || (sc->sc_type >= WM_T_ICH8))
   2143 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2144 	if (sc->sc_type >= WM_T_ICH8)
   2145 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2146 
   2147 	/* Set PHY, NVM mutex related stuff */
   2148 	switch (sc->sc_type) {
   2149 	case WM_T_82542_2_0:
   2150 	case WM_T_82542_2_1:
   2151 	case WM_T_82543:
   2152 	case WM_T_82544:
   2153 		/* Microwire */
   2154 		sc->nvm.read = wm_nvm_read_uwire;
   2155 		sc->sc_nvm_wordsize = 64;
   2156 		sc->sc_nvm_addrbits = 6;
   2157 		break;
   2158 	case WM_T_82540:
   2159 	case WM_T_82545:
   2160 	case WM_T_82545_3:
   2161 	case WM_T_82546:
   2162 	case WM_T_82546_3:
   2163 		/* Microwire */
   2164 		sc->nvm.read = wm_nvm_read_uwire;
   2165 		reg = CSR_READ(sc, WMREG_EECD);
   2166 		if (reg & EECD_EE_SIZE) {
   2167 			sc->sc_nvm_wordsize = 256;
   2168 			sc->sc_nvm_addrbits = 8;
   2169 		} else {
   2170 			sc->sc_nvm_wordsize = 64;
   2171 			sc->sc_nvm_addrbits = 6;
   2172 		}
   2173 		sc->sc_flags |= WM_F_LOCK_EECD;
   2174 		sc->nvm.acquire = wm_get_eecd;
   2175 		sc->nvm.release = wm_put_eecd;
   2176 		break;
   2177 	case WM_T_82541:
   2178 	case WM_T_82541_2:
   2179 	case WM_T_82547:
   2180 	case WM_T_82547_2:
   2181 		reg = CSR_READ(sc, WMREG_EECD);
   2182 		/*
   2183 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2184 		 * on 8254[17], so set flags and functios before calling it.
   2185 		 */
   2186 		sc->sc_flags |= WM_F_LOCK_EECD;
   2187 		sc->nvm.acquire = wm_get_eecd;
   2188 		sc->nvm.release = wm_put_eecd;
   2189 		if (reg & EECD_EE_TYPE) {
   2190 			/* SPI */
   2191 			sc->nvm.read = wm_nvm_read_spi;
   2192 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2193 			wm_nvm_set_addrbits_size_eecd(sc);
   2194 		} else {
   2195 			/* Microwire */
   2196 			sc->nvm.read = wm_nvm_read_uwire;
   2197 			if ((reg & EECD_EE_ABITS) != 0) {
   2198 				sc->sc_nvm_wordsize = 256;
   2199 				sc->sc_nvm_addrbits = 8;
   2200 			} else {
   2201 				sc->sc_nvm_wordsize = 64;
   2202 				sc->sc_nvm_addrbits = 6;
   2203 			}
   2204 		}
   2205 		break;
   2206 	case WM_T_82571:
   2207 	case WM_T_82572:
   2208 		/* SPI */
   2209 		sc->nvm.read = wm_nvm_read_eerd;
   2210 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2211 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2212 		wm_nvm_set_addrbits_size_eecd(sc);
   2213 		sc->phy.acquire = wm_get_swsm_semaphore;
   2214 		sc->phy.release = wm_put_swsm_semaphore;
   2215 		sc->nvm.acquire = wm_get_nvm_82571;
   2216 		sc->nvm.release = wm_put_nvm_82571;
   2217 		break;
   2218 	case WM_T_82573:
   2219 	case WM_T_82574:
   2220 	case WM_T_82583:
   2221 		sc->nvm.read = wm_nvm_read_eerd;
   2222 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2223 		if (sc->sc_type == WM_T_82573) {
   2224 			sc->phy.acquire = wm_get_swsm_semaphore;
   2225 			sc->phy.release = wm_put_swsm_semaphore;
   2226 			sc->nvm.acquire = wm_get_nvm_82571;
   2227 			sc->nvm.release = wm_put_nvm_82571;
   2228 		} else {
   2229 			/* Both PHY and NVM use the same semaphore. */
   2230 			sc->phy.acquire = sc->nvm.acquire
   2231 			    = wm_get_swfwhw_semaphore;
   2232 			sc->phy.release = sc->nvm.release
   2233 			    = wm_put_swfwhw_semaphore;
   2234 		}
   2235 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2236 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2237 			sc->sc_nvm_wordsize = 2048;
   2238 		} else {
   2239 			/* SPI */
   2240 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2241 			wm_nvm_set_addrbits_size_eecd(sc);
   2242 		}
   2243 		break;
   2244 	case WM_T_82575:
   2245 	case WM_T_82576:
   2246 	case WM_T_82580:
   2247 	case WM_T_I350:
   2248 	case WM_T_I354:
   2249 	case WM_T_80003:
   2250 		/* SPI */
   2251 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2252 		wm_nvm_set_addrbits_size_eecd(sc);
   2253 		if ((sc->sc_type == WM_T_80003)
   2254 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2255 			sc->nvm.read = wm_nvm_read_eerd;
   2256 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2257 		} else {
   2258 			sc->nvm.read = wm_nvm_read_spi;
   2259 			sc->sc_flags |= WM_F_LOCK_EECD;
   2260 		}
   2261 		sc->phy.acquire = wm_get_phy_82575;
   2262 		sc->phy.release = wm_put_phy_82575;
   2263 		sc->nvm.acquire = wm_get_nvm_80003;
   2264 		sc->nvm.release = wm_put_nvm_80003;
   2265 		break;
   2266 	case WM_T_ICH8:
   2267 	case WM_T_ICH9:
   2268 	case WM_T_ICH10:
   2269 	case WM_T_PCH:
   2270 	case WM_T_PCH2:
   2271 	case WM_T_PCH_LPT:
   2272 		sc->nvm.read = wm_nvm_read_ich8;
   2273 		/* FLASH */
   2274 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2275 		sc->sc_nvm_wordsize = 2048;
   2276 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2277 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2278 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2279 			aprint_error_dev(sc->sc_dev,
   2280 			    "can't map FLASH registers\n");
   2281 			goto out;
   2282 		}
   2283 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2284 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2285 		    ICH_FLASH_SECTOR_SIZE;
   2286 		sc->sc_ich8_flash_bank_size =
   2287 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2288 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2289 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2290 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2291 		sc->sc_flashreg_offset = 0;
   2292 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2293 		sc->phy.release = wm_put_swflag_ich8lan;
   2294 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2295 		sc->nvm.release = wm_put_nvm_ich8lan;
   2296 		break;
   2297 	case WM_T_PCH_SPT:
   2298 	case WM_T_PCH_CNP:
   2299 		sc->nvm.read = wm_nvm_read_spt;
   2300 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2301 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2302 		sc->sc_flasht = sc->sc_st;
   2303 		sc->sc_flashh = sc->sc_sh;
   2304 		sc->sc_ich8_flash_base = 0;
   2305 		sc->sc_nvm_wordsize =
   2306 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2307 		    * NVM_SIZE_MULTIPLIER;
   2308 		/* It is size in bytes, we want words */
   2309 		sc->sc_nvm_wordsize /= 2;
   2310 		/* Assume 2 banks */
   2311 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2312 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2313 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2314 		sc->phy.release = wm_put_swflag_ich8lan;
   2315 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2316 		sc->nvm.release = wm_put_nvm_ich8lan;
   2317 		break;
   2318 	case WM_T_I210:
   2319 	case WM_T_I211:
   2320 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2321 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2322 		if (wm_nvm_flash_presence_i210(sc)) {
   2323 			sc->nvm.read = wm_nvm_read_eerd;
   2324 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2325 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2326 			wm_nvm_set_addrbits_size_eecd(sc);
   2327 		} else {
   2328 			sc->nvm.read = wm_nvm_read_invm;
   2329 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2330 			sc->sc_nvm_wordsize = INVM_SIZE;
   2331 		}
   2332 		sc->phy.acquire = wm_get_phy_82575;
   2333 		sc->phy.release = wm_put_phy_82575;
   2334 		sc->nvm.acquire = wm_get_nvm_80003;
   2335 		sc->nvm.release = wm_put_nvm_80003;
   2336 		break;
   2337 	default:
   2338 		break;
   2339 	}
   2340 
   2341 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2342 	switch (sc->sc_type) {
   2343 	case WM_T_82571:
   2344 	case WM_T_82572:
   2345 		reg = CSR_READ(sc, WMREG_SWSM2);
   2346 		if ((reg & SWSM2_LOCK) == 0) {
   2347 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2348 			force_clear_smbi = true;
   2349 		} else
   2350 			force_clear_smbi = false;
   2351 		break;
   2352 	case WM_T_82573:
   2353 	case WM_T_82574:
   2354 	case WM_T_82583:
   2355 		force_clear_smbi = true;
   2356 		break;
   2357 	default:
   2358 		force_clear_smbi = false;
   2359 		break;
   2360 	}
   2361 	if (force_clear_smbi) {
   2362 		reg = CSR_READ(sc, WMREG_SWSM);
   2363 		if ((reg & SWSM_SMBI) != 0)
   2364 			aprint_error_dev(sc->sc_dev,
   2365 			    "Please update the Bootagent\n");
   2366 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2367 	}
   2368 
   2369 	/*
   2370 	 * Defer printing the EEPROM type until after verifying the checksum
   2371 	 * This allows the EEPROM type to be printed correctly in the case
   2372 	 * that no EEPROM is attached.
   2373 	 */
   2374 	/*
   2375 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2376 	 * this for later, so we can fail future reads from the EEPROM.
   2377 	 */
   2378 	if (wm_nvm_validate_checksum(sc)) {
   2379 		/*
   2380 		 * Read twice again because some PCI-e parts fail the
   2381 		 * first check due to the link being in sleep state.
   2382 		 */
   2383 		if (wm_nvm_validate_checksum(sc))
   2384 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2385 	}
   2386 
   2387 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2388 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2389 	else {
   2390 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2391 		    sc->sc_nvm_wordsize);
   2392 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2393 			aprint_verbose("iNVM");
   2394 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2395 			aprint_verbose("FLASH(HW)");
   2396 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2397 			aprint_verbose("FLASH");
   2398 		else {
   2399 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2400 				eetype = "SPI";
   2401 			else
   2402 				eetype = "MicroWire";
   2403 			aprint_verbose("(%d address bits) %s EEPROM",
   2404 			    sc->sc_nvm_addrbits, eetype);
   2405 		}
   2406 	}
   2407 	wm_nvm_version(sc);
   2408 	aprint_verbose("\n");
   2409 
   2410 	/*
   2411 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2412 	 * incorrect.
   2413 	 */
   2414 	wm_gmii_setup_phytype(sc, 0, 0);
   2415 
   2416 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2417 	switch (sc->sc_type) {
   2418 	case WM_T_ICH8:
   2419 	case WM_T_ICH9:
   2420 	case WM_T_ICH10:
   2421 	case WM_T_PCH:
   2422 	case WM_T_PCH2:
   2423 	case WM_T_PCH_LPT:
   2424 	case WM_T_PCH_SPT:
   2425 	case WM_T_PCH_CNP:
   2426 		apme_mask = WUC_APME;
   2427 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2428 		if ((eeprom_data & apme_mask) != 0)
   2429 			sc->sc_flags |= WM_F_WOL;
   2430 		break;
   2431 	default:
   2432 		break;
   2433 	}
   2434 
   2435 	/* Reset the chip to a known state. */
   2436 	wm_reset(sc);
   2437 
   2438 	/*
   2439 	 * Check for I21[01] PLL workaround.
   2440 	 *
   2441 	 * Three cases:
   2442 	 * a) Chip is I211.
   2443 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2444 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2445 	 */
   2446 	if (sc->sc_type == WM_T_I211)
   2447 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2448 	if (sc->sc_type == WM_T_I210) {
   2449 		if (!wm_nvm_flash_presence_i210(sc))
   2450 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2451 		else if ((sc->sc_nvm_ver_major < 3)
   2452 		    || ((sc->sc_nvm_ver_major == 3)
   2453 			&& (sc->sc_nvm_ver_minor < 25))) {
   2454 			aprint_verbose_dev(sc->sc_dev,
   2455 			    "ROM image version %d.%d is older than 3.25\n",
   2456 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2457 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2458 		}
   2459 	}
   2460 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2461 		wm_pll_workaround_i210(sc);
   2462 
   2463 	wm_get_wakeup(sc);
   2464 
   2465 	/* Non-AMT based hardware can now take control from firmware */
   2466 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2467 		wm_get_hw_control(sc);
   2468 
   2469 	/*
   2470 	 * Read the Ethernet address from the EEPROM, if not first found
   2471 	 * in device properties.
   2472 	 */
   2473 	ea = prop_dictionary_get(dict, "mac-address");
   2474 	if (ea != NULL) {
   2475 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2476 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2477 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2478 	} else {
   2479 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2480 			aprint_error_dev(sc->sc_dev,
   2481 			    "unable to read Ethernet address\n");
   2482 			goto out;
   2483 		}
   2484 	}
   2485 
   2486 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2487 	    ether_sprintf(enaddr));
   2488 
   2489 	/*
   2490 	 * Read the config info from the EEPROM, and set up various
   2491 	 * bits in the control registers based on their contents.
   2492 	 */
   2493 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2494 	if (pn != NULL) {
   2495 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2496 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2497 	} else {
   2498 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2499 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2500 			goto out;
   2501 		}
   2502 	}
   2503 
   2504 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2505 	if (pn != NULL) {
   2506 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2507 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2508 	} else {
   2509 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2510 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2511 			goto out;
   2512 		}
   2513 	}
   2514 
   2515 	/* check for WM_F_WOL */
   2516 	switch (sc->sc_type) {
   2517 	case WM_T_82542_2_0:
   2518 	case WM_T_82542_2_1:
   2519 	case WM_T_82543:
   2520 		/* dummy? */
   2521 		eeprom_data = 0;
   2522 		apme_mask = NVM_CFG3_APME;
   2523 		break;
   2524 	case WM_T_82544:
   2525 		apme_mask = NVM_CFG2_82544_APM_EN;
   2526 		eeprom_data = cfg2;
   2527 		break;
   2528 	case WM_T_82546:
   2529 	case WM_T_82546_3:
   2530 	case WM_T_82571:
   2531 	case WM_T_82572:
   2532 	case WM_T_82573:
   2533 	case WM_T_82574:
   2534 	case WM_T_82583:
   2535 	case WM_T_80003:
   2536 	case WM_T_82575:
   2537 	case WM_T_82576:
   2538 		apme_mask = NVM_CFG3_APME;
   2539 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2540 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2541 		break;
   2542 	case WM_T_82580:
   2543 	case WM_T_I350:
   2544 	case WM_T_I354:
   2545 	case WM_T_I210:
   2546 	case WM_T_I211:
   2547 		apme_mask = NVM_CFG3_APME;
   2548 		wm_nvm_read(sc,
   2549 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2550 		    1, &eeprom_data);
   2551 		break;
   2552 	case WM_T_ICH8:
   2553 	case WM_T_ICH9:
   2554 	case WM_T_ICH10:
   2555 	case WM_T_PCH:
   2556 	case WM_T_PCH2:
   2557 	case WM_T_PCH_LPT:
   2558 	case WM_T_PCH_SPT:
   2559 	case WM_T_PCH_CNP:
   2560 		/* Already checked before wm_reset () */
   2561 		apme_mask = eeprom_data = 0;
   2562 		break;
   2563 	default: /* XXX 82540 */
   2564 		apme_mask = NVM_CFG3_APME;
   2565 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2566 		break;
   2567 	}
   2568 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2569 	if ((eeprom_data & apme_mask) != 0)
   2570 		sc->sc_flags |= WM_F_WOL;
   2571 
   2572 	/*
   2573 	 * We have the eeprom settings, now apply the special cases
   2574 	 * where the eeprom may be wrong or the board won't support
   2575 	 * wake on lan on a particular port
   2576 	 */
   2577 	switch (sc->sc_pcidevid) {
   2578 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2579 		sc->sc_flags &= ~WM_F_WOL;
   2580 		break;
   2581 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2582 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2583 		/* Wake events only supported on port A for dual fiber
   2584 		 * regardless of eeprom setting */
   2585 		if (sc->sc_funcid == 1)
   2586 			sc->sc_flags &= ~WM_F_WOL;
   2587 		break;
   2588 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2589 		/* If quad port adapter, disable WoL on all but port A */
   2590 		if (sc->sc_funcid != 0)
   2591 			sc->sc_flags &= ~WM_F_WOL;
   2592 		break;
   2593 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2594 		/* Wake events only supported on port A for dual fiber
   2595 		 * regardless of eeprom setting */
   2596 		if (sc->sc_funcid == 1)
   2597 			sc->sc_flags &= ~WM_F_WOL;
   2598 		break;
   2599 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2600 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2601 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2602 		/* If quad port adapter, disable WoL on all but port A */
   2603 		if (sc->sc_funcid != 0)
   2604 			sc->sc_flags &= ~WM_F_WOL;
   2605 		break;
   2606 	}
   2607 
   2608 	if (sc->sc_type >= WM_T_82575) {
   2609 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2610 			if ((sc->sc_type == WM_T_82575) ||
   2611 			    (sc->sc_type == WM_T_82576)) {
   2612 				/* Check NVM for autonegotiation */
   2613 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2614 				    != 0)
   2615 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2616 			}
   2617 			if ((sc->sc_type == WM_T_82575) ||
   2618 			    (sc->sc_type == WM_T_I350)) {
   2619 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2620 					sc->sc_flags |= WM_F_MAS;
   2621 			}
   2622 		}
   2623 	}
   2624 
   2625 	/*
   2626 	 * XXX need special handling for some multiple port cards
   2627 	 * to disable a paticular port.
   2628 	 */
   2629 
   2630 	if (sc->sc_type >= WM_T_82544) {
   2631 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2632 		if (pn != NULL) {
   2633 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2634 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2635 		} else {
   2636 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2637 				aprint_error_dev(sc->sc_dev,
   2638 				    "unable to read SWDPIN\n");
   2639 				goto out;
   2640 			}
   2641 		}
   2642 	}
   2643 
   2644 	if (cfg1 & NVM_CFG1_ILOS)
   2645 		sc->sc_ctrl |= CTRL_ILOS;
   2646 
   2647 	/*
   2648 	 * XXX
   2649 	 * This code isn't correct because pin 2 and 3 are located
   2650 	 * in different position on newer chips. Check all datasheet.
   2651 	 *
   2652 	 * Until resolve this problem, check if a chip < 82580
   2653 	 */
   2654 	if (sc->sc_type <= WM_T_82580) {
   2655 		if (sc->sc_type >= WM_T_82544) {
   2656 			sc->sc_ctrl |=
   2657 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2658 			    CTRL_SWDPIO_SHIFT;
   2659 			sc->sc_ctrl |=
   2660 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2661 			    CTRL_SWDPINS_SHIFT;
   2662 		} else {
   2663 			sc->sc_ctrl |=
   2664 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2665 			    CTRL_SWDPIO_SHIFT;
   2666 		}
   2667 	}
   2668 
   2669 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2670 		wm_nvm_read(sc,
   2671 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2672 		    1, &nvmword);
   2673 		if (nvmword & NVM_CFG3_ILOS)
   2674 			sc->sc_ctrl |= CTRL_ILOS;
   2675 	}
   2676 
   2677 #if 0
   2678 	if (sc->sc_type >= WM_T_82544) {
   2679 		if (cfg1 & NVM_CFG1_IPS0)
   2680 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2681 		if (cfg1 & NVM_CFG1_IPS1)
   2682 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2683 		sc->sc_ctrl_ext |=
   2684 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2685 		    CTRL_EXT_SWDPIO_SHIFT;
   2686 		sc->sc_ctrl_ext |=
   2687 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2688 		    CTRL_EXT_SWDPINS_SHIFT;
   2689 	} else {
   2690 		sc->sc_ctrl_ext |=
   2691 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2692 		    CTRL_EXT_SWDPIO_SHIFT;
   2693 	}
   2694 #endif
   2695 
   2696 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2697 #if 0
   2698 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2699 #endif
   2700 
   2701 	if (sc->sc_type == WM_T_PCH) {
   2702 		uint16_t val;
   2703 
   2704 		/* Save the NVM K1 bit setting */
   2705 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2706 
   2707 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2708 			sc->sc_nvm_k1_enabled = 1;
   2709 		else
   2710 			sc->sc_nvm_k1_enabled = 0;
   2711 	}
   2712 
   2713 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2714 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2715 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2716 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2717 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2718 	    || sc->sc_type == WM_T_82573
   2719 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2720 		/* Copper only */
   2721 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2722 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2723 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2724 	    || (sc->sc_type ==WM_T_I211)) {
   2725 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2726 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2727 		switch (link_mode) {
   2728 		case CTRL_EXT_LINK_MODE_1000KX:
   2729 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2730 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2731 			break;
   2732 		case CTRL_EXT_LINK_MODE_SGMII:
   2733 			if (wm_sgmii_uses_mdio(sc)) {
   2734 				aprint_normal_dev(sc->sc_dev,
   2735 				    "SGMII(MDIO)\n");
   2736 				sc->sc_flags |= WM_F_SGMII;
   2737 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2738 				break;
   2739 			}
   2740 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2741 			/*FALLTHROUGH*/
   2742 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2743 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2744 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2745 				if (link_mode
   2746 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2747 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2748 					sc->sc_flags |= WM_F_SGMII;
   2749 					aprint_verbose_dev(sc->sc_dev,
   2750 					    "SGMII\n");
   2751 				} else {
   2752 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2753 					aprint_verbose_dev(sc->sc_dev,
   2754 					    "SERDES\n");
   2755 				}
   2756 				break;
   2757 			}
   2758 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2759 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2760 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2761 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2762 				sc->sc_flags |= WM_F_SGMII;
   2763 			}
   2764 			/* Do not change link mode for 100BaseFX */
   2765 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2766 				break;
   2767 
   2768 			/* Change current link mode setting */
   2769 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2770 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2771 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2772 			else
   2773 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2774 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2775 			break;
   2776 		case CTRL_EXT_LINK_MODE_GMII:
   2777 		default:
   2778 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2779 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2780 			break;
   2781 		}
   2782 
   2783 		reg &= ~CTRL_EXT_I2C_ENA;
   2784 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2785 			reg |= CTRL_EXT_I2C_ENA;
   2786 		else
   2787 			reg &= ~CTRL_EXT_I2C_ENA;
   2788 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2789 	} else if (sc->sc_type < WM_T_82543 ||
   2790 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2791 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2792 			aprint_error_dev(sc->sc_dev,
   2793 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2794 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2795 		}
   2796 	} else {
   2797 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2798 			aprint_error_dev(sc->sc_dev,
   2799 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2800 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2801 		}
   2802 	}
   2803 
   2804 	if (sc->sc_type >= WM_T_PCH2)
   2805 		sc->sc_flags |= WM_F_EEE;
   2806 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2807 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2808 		/* XXX: Need special handling for I354. (not yet) */
   2809 		if (sc->sc_type != WM_T_I354)
   2810 			sc->sc_flags |= WM_F_EEE;
   2811 	}
   2812 
   2813 	/* Set device properties (macflags) */
   2814 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2815 
   2816 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2817 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2818 
   2819 	/* Initialize the media structures accordingly. */
   2820 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2821 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2822 	else
   2823 		wm_tbi_mediainit(sc); /* All others */
   2824 
   2825 	ifp = &sc->sc_ethercom.ec_if;
   2826 	xname = device_xname(sc->sc_dev);
   2827 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2828 	ifp->if_softc = sc;
   2829 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2830 #ifdef WM_MPSAFE
   2831 	ifp->if_extflags = IFEF_MPSAFE;
   2832 #endif
   2833 	ifp->if_ioctl = wm_ioctl;
   2834 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2835 		ifp->if_start = wm_nq_start;
   2836 		/*
   2837 		 * When the number of CPUs is one and the controller can use
   2838 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2839 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2840 		 * and the other is used for link status changing.
   2841 		 * In this situation, wm_nq_transmit() is disadvantageous
   2842 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2843 		 */
   2844 		if (wm_is_using_multiqueue(sc))
   2845 			ifp->if_transmit = wm_nq_transmit;
   2846 	} else {
   2847 		ifp->if_start = wm_start;
   2848 		/*
   2849 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2850 		 */
   2851 		if (wm_is_using_multiqueue(sc))
   2852 			ifp->if_transmit = wm_transmit;
   2853 	}
   2854 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2855 	ifp->if_init = wm_init;
   2856 	ifp->if_stop = wm_stop;
   2857 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2858 	IFQ_SET_READY(&ifp->if_snd);
   2859 
   2860 	/* Check for jumbo frame */
   2861 	switch (sc->sc_type) {
   2862 	case WM_T_82573:
   2863 		/* XXX limited to 9234 if ASPM is disabled */
   2864 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2865 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2866 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2867 		break;
   2868 	case WM_T_82571:
   2869 	case WM_T_82572:
   2870 	case WM_T_82574:
   2871 	case WM_T_82583:
   2872 	case WM_T_82575:
   2873 	case WM_T_82576:
   2874 	case WM_T_82580:
   2875 	case WM_T_I350:
   2876 	case WM_T_I354:
   2877 	case WM_T_I210:
   2878 	case WM_T_I211:
   2879 	case WM_T_80003:
   2880 	case WM_T_ICH9:
   2881 	case WM_T_ICH10:
   2882 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2883 	case WM_T_PCH_LPT:
   2884 	case WM_T_PCH_SPT:
   2885 	case WM_T_PCH_CNP:
   2886 		/* XXX limited to 9234 */
   2887 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2888 		break;
   2889 	case WM_T_PCH:
   2890 		/* XXX limited to 4096 */
   2891 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2892 		break;
   2893 	case WM_T_82542_2_0:
   2894 	case WM_T_82542_2_1:
   2895 	case WM_T_ICH8:
   2896 		/* No support for jumbo frame */
   2897 		break;
   2898 	default:
   2899 		/* ETHER_MAX_LEN_JUMBO */
   2900 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2901 		break;
   2902 	}
   2903 
   2904 	/* If we're a i82543 or greater, we can support VLANs. */
   2905 	if (sc->sc_type >= WM_T_82543) {
   2906 		sc->sc_ethercom.ec_capabilities |=
   2907 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2908 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   2909 	}
   2910 
   2911 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2912 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2913 
   2914 	/*
   2915 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2916 	 * on i82543 and later.
   2917 	 */
   2918 	if (sc->sc_type >= WM_T_82543) {
   2919 		ifp->if_capabilities |=
   2920 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2921 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2922 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2923 		    IFCAP_CSUM_TCPv6_Tx |
   2924 		    IFCAP_CSUM_UDPv6_Tx;
   2925 	}
   2926 
   2927 	/*
   2928 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2929 	 *
   2930 	 *	82541GI (8086:1076) ... no
   2931 	 *	82572EI (8086:10b9) ... yes
   2932 	 */
   2933 	if (sc->sc_type >= WM_T_82571) {
   2934 		ifp->if_capabilities |=
   2935 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2936 	}
   2937 
   2938 	/*
   2939 	 * If we're a i82544 or greater (except i82547), we can do
   2940 	 * TCP segmentation offload.
   2941 	 */
   2942 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2943 		ifp->if_capabilities |= IFCAP_TSOv4;
   2944 	}
   2945 
   2946 	if (sc->sc_type >= WM_T_82571) {
   2947 		ifp->if_capabilities |= IFCAP_TSOv6;
   2948 	}
   2949 
   2950 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2951 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2952 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2953 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2954 
   2955 #ifdef WM_MPSAFE
   2956 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2957 #else
   2958 	sc->sc_core_lock = NULL;
   2959 #endif
   2960 
   2961 	/* Attach the interface. */
   2962 	error = if_initialize(ifp);
   2963 	if (error != 0) {
   2964 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2965 		    error);
   2966 		return; /* Error */
   2967 	}
   2968 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2969 	ether_ifattach(ifp, enaddr);
   2970 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2971 	if_register(ifp);
   2972 
   2973 #ifdef WM_EVENT_COUNTERS
   2974 	/* Attach event counters. */
   2975 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2976 	    NULL, xname, "linkintr");
   2977 
   2978 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2979 	    NULL, xname, "tx_xoff");
   2980 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2981 	    NULL, xname, "tx_xon");
   2982 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2983 	    NULL, xname, "rx_xoff");
   2984 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2985 	    NULL, xname, "rx_xon");
   2986 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2987 	    NULL, xname, "rx_macctl");
   2988 #endif /* WM_EVENT_COUNTERS */
   2989 
   2990 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2991 		pmf_class_network_register(self, ifp);
   2992 	else
   2993 		aprint_error_dev(self, "couldn't establish power handler\n");
   2994 
   2995 	sc->sc_flags |= WM_F_ATTACHED;
   2996 out:
   2997 	return;
   2998 }
   2999 
   3000 /* The detach function (ca_detach) */
   3001 static int
   3002 wm_detach(device_t self, int flags __unused)
   3003 {
   3004 	struct wm_softc *sc = device_private(self);
   3005 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3006 	int i;
   3007 
   3008 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3009 		return 0;
   3010 
   3011 	/* Stop the interface. Callouts are stopped in it. */
   3012 	wm_stop(ifp, 1);
   3013 
   3014 	pmf_device_deregister(self);
   3015 
   3016 #ifdef WM_EVENT_COUNTERS
   3017 	evcnt_detach(&sc->sc_ev_linkintr);
   3018 
   3019 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3020 	evcnt_detach(&sc->sc_ev_tx_xon);
   3021 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3022 	evcnt_detach(&sc->sc_ev_rx_xon);
   3023 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3024 #endif /* WM_EVENT_COUNTERS */
   3025 
   3026 	/* Tell the firmware about the release */
   3027 	WM_CORE_LOCK(sc);
   3028 	wm_release_manageability(sc);
   3029 	wm_release_hw_control(sc);
   3030 	wm_enable_wakeup(sc);
   3031 	WM_CORE_UNLOCK(sc);
   3032 
   3033 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3034 
   3035 	/* Delete all remaining media. */
   3036 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   3037 
   3038 	ether_ifdetach(ifp);
   3039 	if_detach(ifp);
   3040 	if_percpuq_destroy(sc->sc_ipq);
   3041 
   3042 	/* Unload RX dmamaps and free mbufs */
   3043 	for (i = 0; i < sc->sc_nqueues; i++) {
   3044 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3045 		mutex_enter(rxq->rxq_lock);
   3046 		wm_rxdrain(rxq);
   3047 		mutex_exit(rxq->rxq_lock);
   3048 	}
   3049 	/* Must unlock here */
   3050 
   3051 	/* Disestablish the interrupt handler */
   3052 	for (i = 0; i < sc->sc_nintrs; i++) {
   3053 		if (sc->sc_ihs[i] != NULL) {
   3054 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3055 			sc->sc_ihs[i] = NULL;
   3056 		}
   3057 	}
   3058 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3059 
   3060 	wm_free_txrx_queues(sc);
   3061 
   3062 	/* Unmap the registers */
   3063 	if (sc->sc_ss) {
   3064 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3065 		sc->sc_ss = 0;
   3066 	}
   3067 	if (sc->sc_ios) {
   3068 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3069 		sc->sc_ios = 0;
   3070 	}
   3071 	if (sc->sc_flashs) {
   3072 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3073 		sc->sc_flashs = 0;
   3074 	}
   3075 
   3076 	if (sc->sc_core_lock)
   3077 		mutex_obj_free(sc->sc_core_lock);
   3078 	if (sc->sc_ich_phymtx)
   3079 		mutex_obj_free(sc->sc_ich_phymtx);
   3080 	if (sc->sc_ich_nvmmtx)
   3081 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3082 
   3083 	return 0;
   3084 }
   3085 
   3086 static bool
   3087 wm_suspend(device_t self, const pmf_qual_t *qual)
   3088 {
   3089 	struct wm_softc *sc = device_private(self);
   3090 
   3091 	wm_release_manageability(sc);
   3092 	wm_release_hw_control(sc);
   3093 	wm_enable_wakeup(sc);
   3094 
   3095 	return true;
   3096 }
   3097 
   3098 static bool
   3099 wm_resume(device_t self, const pmf_qual_t *qual)
   3100 {
   3101 	struct wm_softc *sc = device_private(self);
   3102 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3103 	pcireg_t reg;
   3104 	char buf[256];
   3105 
   3106 	reg = CSR_READ(sc, WMREG_WUS);
   3107 	if (reg != 0) {
   3108 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3109 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3110 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3111 	}
   3112 
   3113 	if (sc->sc_type >= WM_T_PCH2)
   3114 		wm_resume_workarounds_pchlan(sc);
   3115 	if ((ifp->if_flags & IFF_UP) == 0) {
   3116 		wm_reset(sc);
   3117 		/* Non-AMT based hardware can now take control from firmware */
   3118 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3119 			wm_get_hw_control(sc);
   3120 		wm_init_manageability(sc);
   3121 	} else {
   3122 		/*
   3123 		 * We called pmf_class_network_register(), so if_init() is
   3124 		 * automatically called when IFF_UP. wm_reset(),
   3125 		 * wm_get_hw_control() and wm_init_manageability() are called
   3126 		 * via wm_init().
   3127 		 */
   3128 	}
   3129 
   3130 	return true;
   3131 }
   3132 
   3133 /*
   3134  * wm_watchdog:		[ifnet interface function]
   3135  *
   3136  *	Watchdog timer handler.
   3137  */
   3138 static void
   3139 wm_watchdog(struct ifnet *ifp)
   3140 {
   3141 	int qid;
   3142 	struct wm_softc *sc = ifp->if_softc;
   3143 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3144 
   3145 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3146 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3147 
   3148 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3149 	}
   3150 
   3151 	/* IF any of queues hanged up, reset the interface. */
   3152 	if (hang_queue != 0) {
   3153 		(void)wm_init(ifp);
   3154 
   3155 		/*
   3156 		 * There are still some upper layer processing which call
   3157 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3158 		 */
   3159 		/* Try to get more packets going. */
   3160 		ifp->if_start(ifp);
   3161 	}
   3162 }
   3163 
   3164 
   3165 static void
   3166 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3167 {
   3168 
   3169 	mutex_enter(txq->txq_lock);
   3170 	if (txq->txq_sending &&
   3171 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3172 		wm_watchdog_txq_locked(ifp, txq, hang);
   3173 
   3174 	mutex_exit(txq->txq_lock);
   3175 }
   3176 
   3177 static void
   3178 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3179     uint16_t *hang)
   3180 {
   3181 	struct wm_softc *sc = ifp->if_softc;
   3182 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3183 
   3184 	KASSERT(mutex_owned(txq->txq_lock));
   3185 
   3186 	/*
   3187 	 * Since we're using delayed interrupts, sweep up
   3188 	 * before we report an error.
   3189 	 */
   3190 	wm_txeof(txq, UINT_MAX);
   3191 
   3192 	if (txq->txq_sending)
   3193 		*hang |= __BIT(wmq->wmq_id);
   3194 
   3195 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3196 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3197 		    device_xname(sc->sc_dev));
   3198 	} else {
   3199 #ifdef WM_DEBUG
   3200 		int i, j;
   3201 		struct wm_txsoft *txs;
   3202 #endif
   3203 		log(LOG_ERR,
   3204 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3205 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3206 		    txq->txq_next);
   3207 		ifp->if_oerrors++;
   3208 #ifdef WM_DEBUG
   3209 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3210 		    i = WM_NEXTTXS(txq, i)) {
   3211 			txs = &txq->txq_soft[i];
   3212 			printf("txs %d tx %d -> %d\n",
   3213 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3214 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3215 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3216 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3217 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3218 					printf("\t %#08x%08x\n",
   3219 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3220 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3221 				} else {
   3222 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3223 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3224 					    txq->txq_descs[j].wtx_addr.wa_low);
   3225 					printf("\t %#04x%02x%02x%08x\n",
   3226 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3227 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3228 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3229 					    txq->txq_descs[j].wtx_cmdlen);
   3230 				}
   3231 				if (j == txs->txs_lastdesc)
   3232 					break;
   3233 			}
   3234 		}
   3235 #endif
   3236 	}
   3237 }
   3238 
   3239 /*
   3240  * wm_tick:
   3241  *
   3242  *	One second timer, used to check link status, sweep up
   3243  *	completed transmit jobs, etc.
   3244  */
   3245 static void
   3246 wm_tick(void *arg)
   3247 {
   3248 	struct wm_softc *sc = arg;
   3249 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3250 #ifndef WM_MPSAFE
   3251 	int s = splnet();
   3252 #endif
   3253 
   3254 	WM_CORE_LOCK(sc);
   3255 
   3256 	if (sc->sc_core_stopping) {
   3257 		WM_CORE_UNLOCK(sc);
   3258 #ifndef WM_MPSAFE
   3259 		splx(s);
   3260 #endif
   3261 		return;
   3262 	}
   3263 
   3264 	if (sc->sc_type >= WM_T_82542_2_1) {
   3265 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3266 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3267 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3268 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3269 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3270 	}
   3271 
   3272 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3273 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3274 	    + CSR_READ(sc, WMREG_CRCERRS)
   3275 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3276 	    + CSR_READ(sc, WMREG_SYMERRC)
   3277 	    + CSR_READ(sc, WMREG_RXERRC)
   3278 	    + CSR_READ(sc, WMREG_SEC)
   3279 	    + CSR_READ(sc, WMREG_CEXTERR)
   3280 	    + CSR_READ(sc, WMREG_RLEC);
   3281 	/*
   3282 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3283 	 * memory. It does not mean the number of dropped packet. Because
   3284 	 * ethernet controller can receive packets in such case if there is
   3285 	 * space in phy's FIFO.
   3286 	 *
   3287 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3288 	 * own EVCNT instead of if_iqdrops.
   3289 	 */
   3290 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3291 
   3292 	if (sc->sc_flags & WM_F_HAS_MII)
   3293 		mii_tick(&sc->sc_mii);
   3294 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3295 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3296 		wm_serdes_tick(sc);
   3297 	else
   3298 		wm_tbi_tick(sc);
   3299 
   3300 	WM_CORE_UNLOCK(sc);
   3301 
   3302 	wm_watchdog(ifp);
   3303 
   3304 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3305 }
   3306 
   3307 static int
   3308 wm_ifflags_cb(struct ethercom *ec)
   3309 {
   3310 	struct ifnet *ifp = &ec->ec_if;
   3311 	struct wm_softc *sc = ifp->if_softc;
   3312 	u_short iffchange;
   3313 	int ecchange;
   3314 	bool needreset = false;
   3315 	int rc = 0;
   3316 
   3317 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3318 		device_xname(sc->sc_dev), __func__));
   3319 
   3320 	WM_CORE_LOCK(sc);
   3321 
   3322 	/*
   3323 	 * Check for if_flags.
   3324 	 * Main usage is to prevent linkdown when opening bpf.
   3325 	 */
   3326 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3327 	sc->sc_if_flags = ifp->if_flags;
   3328 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3329 		needreset = true;
   3330 		goto ec;
   3331 	}
   3332 
   3333 	/* iff related updates */
   3334 	if ((iffchange & IFF_PROMISC) != 0)
   3335 		wm_set_filter(sc);
   3336 
   3337 	wm_set_vlan(sc);
   3338 
   3339 ec:
   3340 	/* Check for ec_capenable. */
   3341 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3342 	sc->sc_ec_capenable = ec->ec_capenable;
   3343 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3344 		needreset = true;
   3345 		goto out;
   3346 	}
   3347 
   3348 	/* ec related updates */
   3349 	wm_set_eee(sc);
   3350 
   3351 out:
   3352 	if (needreset)
   3353 		rc = ENETRESET;
   3354 	WM_CORE_UNLOCK(sc);
   3355 
   3356 	return rc;
   3357 }
   3358 
   3359 /*
   3360  * wm_ioctl:		[ifnet interface function]
   3361  *
   3362  *	Handle control requests from the operator.
   3363  */
   3364 static int
   3365 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3366 {
   3367 	struct wm_softc *sc = ifp->if_softc;
   3368 	struct ifreq *ifr = (struct ifreq *)data;
   3369 	struct ifaddr *ifa = (struct ifaddr *)data;
   3370 	struct sockaddr_dl *sdl;
   3371 	int s, error;
   3372 
   3373 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3374 		device_xname(sc->sc_dev), __func__));
   3375 
   3376 #ifndef WM_MPSAFE
   3377 	s = splnet();
   3378 #endif
   3379 	switch (cmd) {
   3380 	case SIOCSIFMEDIA:
   3381 		WM_CORE_LOCK(sc);
   3382 		/* Flow control requires full-duplex mode. */
   3383 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3384 		    (ifr->ifr_media & IFM_FDX) == 0)
   3385 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3386 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3387 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3388 				/* We can do both TXPAUSE and RXPAUSE. */
   3389 				ifr->ifr_media |=
   3390 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3391 			}
   3392 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3393 		}
   3394 		WM_CORE_UNLOCK(sc);
   3395 #ifdef WM_MPSAFE
   3396 		s = splnet();
   3397 #endif
   3398 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3399 #ifdef WM_MPSAFE
   3400 		splx(s);
   3401 #endif
   3402 		break;
   3403 	case SIOCINITIFADDR:
   3404 		WM_CORE_LOCK(sc);
   3405 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3406 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3407 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3408 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3409 			/* Unicast address is the first multicast entry */
   3410 			wm_set_filter(sc);
   3411 			error = 0;
   3412 			WM_CORE_UNLOCK(sc);
   3413 			break;
   3414 		}
   3415 		WM_CORE_UNLOCK(sc);
   3416 		/*FALLTHROUGH*/
   3417 	default:
   3418 #ifdef WM_MPSAFE
   3419 		s = splnet();
   3420 #endif
   3421 		/* It may call wm_start, so unlock here */
   3422 		error = ether_ioctl(ifp, cmd, data);
   3423 #ifdef WM_MPSAFE
   3424 		splx(s);
   3425 #endif
   3426 		if (error != ENETRESET)
   3427 			break;
   3428 
   3429 		error = 0;
   3430 
   3431 		if (cmd == SIOCSIFCAP)
   3432 			error = (*ifp->if_init)(ifp);
   3433 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3434 			;
   3435 		else if (ifp->if_flags & IFF_RUNNING) {
   3436 			/*
   3437 			 * Multicast list has changed; set the hardware filter
   3438 			 * accordingly.
   3439 			 */
   3440 			WM_CORE_LOCK(sc);
   3441 			wm_set_filter(sc);
   3442 			WM_CORE_UNLOCK(sc);
   3443 		}
   3444 		break;
   3445 	}
   3446 
   3447 #ifndef WM_MPSAFE
   3448 	splx(s);
   3449 #endif
   3450 	return error;
   3451 }
   3452 
   3453 /* MAC address related */
   3454 
   3455 /*
   3456  * Get the offset of MAC address and return it.
   3457  * If error occured, use offset 0.
   3458  */
   3459 static uint16_t
   3460 wm_check_alt_mac_addr(struct wm_softc *sc)
   3461 {
   3462 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3463 	uint16_t offset = NVM_OFF_MACADDR;
   3464 
   3465 	/* Try to read alternative MAC address pointer */
   3466 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3467 		return 0;
   3468 
   3469 	/* Check pointer if it's valid or not. */
   3470 	if ((offset == 0x0000) || (offset == 0xffff))
   3471 		return 0;
   3472 
   3473 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3474 	/*
   3475 	 * Check whether alternative MAC address is valid or not.
   3476 	 * Some cards have non 0xffff pointer but those don't use
   3477 	 * alternative MAC address in reality.
   3478 	 *
   3479 	 * Check whether the broadcast bit is set or not.
   3480 	 */
   3481 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3482 		if (((myea[0] & 0xff) & 0x01) == 0)
   3483 			return offset; /* Found */
   3484 
   3485 	/* Not found */
   3486 	return 0;
   3487 }
   3488 
   3489 static int
   3490 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3491 {
   3492 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3493 	uint16_t offset = NVM_OFF_MACADDR;
   3494 	int do_invert = 0;
   3495 
   3496 	switch (sc->sc_type) {
   3497 	case WM_T_82580:
   3498 	case WM_T_I350:
   3499 	case WM_T_I354:
   3500 		/* EEPROM Top Level Partitioning */
   3501 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3502 		break;
   3503 	case WM_T_82571:
   3504 	case WM_T_82575:
   3505 	case WM_T_82576:
   3506 	case WM_T_80003:
   3507 	case WM_T_I210:
   3508 	case WM_T_I211:
   3509 		offset = wm_check_alt_mac_addr(sc);
   3510 		if (offset == 0)
   3511 			if ((sc->sc_funcid & 0x01) == 1)
   3512 				do_invert = 1;
   3513 		break;
   3514 	default:
   3515 		if ((sc->sc_funcid & 0x01) == 1)
   3516 			do_invert = 1;
   3517 		break;
   3518 	}
   3519 
   3520 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3521 		goto bad;
   3522 
   3523 	enaddr[0] = myea[0] & 0xff;
   3524 	enaddr[1] = myea[0] >> 8;
   3525 	enaddr[2] = myea[1] & 0xff;
   3526 	enaddr[3] = myea[1] >> 8;
   3527 	enaddr[4] = myea[2] & 0xff;
   3528 	enaddr[5] = myea[2] >> 8;
   3529 
   3530 	/*
   3531 	 * Toggle the LSB of the MAC address on the second port
   3532 	 * of some dual port cards.
   3533 	 */
   3534 	if (do_invert != 0)
   3535 		enaddr[5] ^= 1;
   3536 
   3537 	return 0;
   3538 
   3539  bad:
   3540 	return -1;
   3541 }
   3542 
   3543 /*
   3544  * wm_set_ral:
   3545  *
   3546  *	Set an entery in the receive address list.
   3547  */
   3548 static void
   3549 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3550 {
   3551 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3552 	uint32_t wlock_mac;
   3553 	int rv;
   3554 
   3555 	if (enaddr != NULL) {
   3556 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3557 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3558 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3559 		ral_hi |= RAL_AV;
   3560 	} else {
   3561 		ral_lo = 0;
   3562 		ral_hi = 0;
   3563 	}
   3564 
   3565 	switch (sc->sc_type) {
   3566 	case WM_T_82542_2_0:
   3567 	case WM_T_82542_2_1:
   3568 	case WM_T_82543:
   3569 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3570 		CSR_WRITE_FLUSH(sc);
   3571 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3572 		CSR_WRITE_FLUSH(sc);
   3573 		break;
   3574 	case WM_T_PCH2:
   3575 	case WM_T_PCH_LPT:
   3576 	case WM_T_PCH_SPT:
   3577 	case WM_T_PCH_CNP:
   3578 		if (idx == 0) {
   3579 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3580 			CSR_WRITE_FLUSH(sc);
   3581 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3582 			CSR_WRITE_FLUSH(sc);
   3583 			return;
   3584 		}
   3585 		if (sc->sc_type != WM_T_PCH2) {
   3586 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3587 			    FWSM_WLOCK_MAC);
   3588 			addrl = WMREG_SHRAL(idx - 1);
   3589 			addrh = WMREG_SHRAH(idx - 1);
   3590 		} else {
   3591 			wlock_mac = 0;
   3592 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3593 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3594 		}
   3595 
   3596 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3597 			rv = wm_get_swflag_ich8lan(sc);
   3598 			if (rv != 0)
   3599 				return;
   3600 			CSR_WRITE(sc, addrl, ral_lo);
   3601 			CSR_WRITE_FLUSH(sc);
   3602 			CSR_WRITE(sc, addrh, ral_hi);
   3603 			CSR_WRITE_FLUSH(sc);
   3604 			wm_put_swflag_ich8lan(sc);
   3605 		}
   3606 
   3607 		break;
   3608 	default:
   3609 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3610 		CSR_WRITE_FLUSH(sc);
   3611 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3612 		CSR_WRITE_FLUSH(sc);
   3613 		break;
   3614 	}
   3615 }
   3616 
   3617 /*
   3618  * wm_mchash:
   3619  *
   3620  *	Compute the hash of the multicast address for the 4096-bit
   3621  *	multicast filter.
   3622  */
   3623 static uint32_t
   3624 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3625 {
   3626 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3627 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3628 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3629 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3630 	uint32_t hash;
   3631 
   3632 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3633 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3634 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3635 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3636 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3637 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3638 		return (hash & 0x3ff);
   3639 	}
   3640 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3641 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3642 
   3643 	return (hash & 0xfff);
   3644 }
   3645 
   3646 /*
   3647  *
   3648  *
   3649  */
   3650 static int
   3651 wm_rar_count(struct wm_softc *sc)
   3652 {
   3653 	int size;
   3654 
   3655 	switch (sc->sc_type) {
   3656 	case WM_T_ICH8:
   3657 		size = WM_RAL_TABSIZE_ICH8 -1;
   3658 		break;
   3659 	case WM_T_ICH9:
   3660 	case WM_T_ICH10:
   3661 	case WM_T_PCH:
   3662 		size = WM_RAL_TABSIZE_ICH8;
   3663 		break;
   3664 	case WM_T_PCH2:
   3665 		size = WM_RAL_TABSIZE_PCH2;
   3666 		break;
   3667 	case WM_T_PCH_LPT:
   3668 	case WM_T_PCH_SPT:
   3669 	case WM_T_PCH_CNP:
   3670 		size = WM_RAL_TABSIZE_PCH_LPT;
   3671 		break;
   3672 	case WM_T_82575:
   3673 	case WM_T_I210:
   3674 	case WM_T_I211:
   3675 		size = WM_RAL_TABSIZE_82575;
   3676 		break;
   3677 	case WM_T_82576:
   3678 	case WM_T_82580:
   3679 		size = WM_RAL_TABSIZE_82576;
   3680 		break;
   3681 	case WM_T_I350:
   3682 	case WM_T_I354:
   3683 		size = WM_RAL_TABSIZE_I350;
   3684 		break;
   3685 	default:
   3686 		size = WM_RAL_TABSIZE;
   3687 	}
   3688 
   3689 	return size;
   3690 }
   3691 
   3692 /*
   3693  * wm_set_filter:
   3694  *
   3695  *	Set up the receive filter.
   3696  */
   3697 static void
   3698 wm_set_filter(struct wm_softc *sc)
   3699 {
   3700 	struct ethercom *ec = &sc->sc_ethercom;
   3701 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3702 	struct ether_multi *enm;
   3703 	struct ether_multistep step;
   3704 	bus_addr_t mta_reg;
   3705 	uint32_t hash, reg, bit;
   3706 	int i, size, ralmax;
   3707 
   3708 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3709 		device_xname(sc->sc_dev), __func__));
   3710 
   3711 	if (sc->sc_type >= WM_T_82544)
   3712 		mta_reg = WMREG_CORDOVA_MTA;
   3713 	else
   3714 		mta_reg = WMREG_MTA;
   3715 
   3716 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3717 
   3718 	if (ifp->if_flags & IFF_BROADCAST)
   3719 		sc->sc_rctl |= RCTL_BAM;
   3720 	if (ifp->if_flags & IFF_PROMISC) {
   3721 		sc->sc_rctl |= RCTL_UPE;
   3722 		ETHER_LOCK(ec);
   3723 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3724 		ETHER_UNLOCK(ec);
   3725 		goto allmulti;
   3726 	}
   3727 
   3728 	/*
   3729 	 * Set the station address in the first RAL slot, and
   3730 	 * clear the remaining slots.
   3731 	 */
   3732 	size = wm_rar_count(sc);
   3733 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3734 
   3735 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3736 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3737 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3738 		switch (i) {
   3739 		case 0:
   3740 			/* We can use all entries */
   3741 			ralmax = size;
   3742 			break;
   3743 		case 1:
   3744 			/* Only RAR[0] */
   3745 			ralmax = 1;
   3746 			break;
   3747 		default:
   3748 			/* Available SHRA + RAR[0] */
   3749 			ralmax = i + 1;
   3750 		}
   3751 	} else
   3752 		ralmax = size;
   3753 	for (i = 1; i < size; i++) {
   3754 		if (i < ralmax)
   3755 			wm_set_ral(sc, NULL, i);
   3756 	}
   3757 
   3758 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3759 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3760 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3761 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3762 		size = WM_ICH8_MC_TABSIZE;
   3763 	else
   3764 		size = WM_MC_TABSIZE;
   3765 	/* Clear out the multicast table. */
   3766 	for (i = 0; i < size; i++) {
   3767 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3768 		CSR_WRITE_FLUSH(sc);
   3769 	}
   3770 
   3771 	ETHER_LOCK(ec);
   3772 	ETHER_FIRST_MULTI(step, ec, enm);
   3773 	while (enm != NULL) {
   3774 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3775 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3776 			ETHER_UNLOCK(ec);
   3777 			/*
   3778 			 * We must listen to a range of multicast addresses.
   3779 			 * For now, just accept all multicasts, rather than
   3780 			 * trying to set only those filter bits needed to match
   3781 			 * the range.  (At this time, the only use of address
   3782 			 * ranges is for IP multicast routing, for which the
   3783 			 * range is big enough to require all bits set.)
   3784 			 */
   3785 			goto allmulti;
   3786 		}
   3787 
   3788 		hash = wm_mchash(sc, enm->enm_addrlo);
   3789 
   3790 		reg = (hash >> 5);
   3791 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3792 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3793 		    || (sc->sc_type == WM_T_PCH2)
   3794 		    || (sc->sc_type == WM_T_PCH_LPT)
   3795 		    || (sc->sc_type == WM_T_PCH_SPT)
   3796 		    || (sc->sc_type == WM_T_PCH_CNP))
   3797 			reg &= 0x1f;
   3798 		else
   3799 			reg &= 0x7f;
   3800 		bit = hash & 0x1f;
   3801 
   3802 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3803 		hash |= 1U << bit;
   3804 
   3805 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3806 			/*
   3807 			 * 82544 Errata 9: Certain register cannot be written
   3808 			 * with particular alignments in PCI-X bus operation
   3809 			 * (FCAH, MTA and VFTA).
   3810 			 */
   3811 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3812 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3813 			CSR_WRITE_FLUSH(sc);
   3814 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3815 			CSR_WRITE_FLUSH(sc);
   3816 		} else {
   3817 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3818 			CSR_WRITE_FLUSH(sc);
   3819 		}
   3820 
   3821 		ETHER_NEXT_MULTI(step, enm);
   3822 	}
   3823 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   3824 	ETHER_UNLOCK(ec);
   3825 
   3826 	goto setit;
   3827 
   3828  allmulti:
   3829 	sc->sc_rctl |= RCTL_MPE;
   3830 
   3831  setit:
   3832 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3833 }
   3834 
   3835 /* Reset and init related */
   3836 
   3837 static void
   3838 wm_set_vlan(struct wm_softc *sc)
   3839 {
   3840 
   3841 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3842 		device_xname(sc->sc_dev), __func__));
   3843 
   3844 	/* Deal with VLAN enables. */
   3845 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3846 		sc->sc_ctrl |= CTRL_VME;
   3847 	else
   3848 		sc->sc_ctrl &= ~CTRL_VME;
   3849 
   3850 	/* Write the control registers. */
   3851 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3852 }
   3853 
   3854 static void
   3855 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3856 {
   3857 	uint32_t gcr;
   3858 	pcireg_t ctrl2;
   3859 
   3860 	gcr = CSR_READ(sc, WMREG_GCR);
   3861 
   3862 	/* Only take action if timeout value is defaulted to 0 */
   3863 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3864 		goto out;
   3865 
   3866 	if ((gcr & GCR_CAP_VER2) == 0) {
   3867 		gcr |= GCR_CMPL_TMOUT_10MS;
   3868 		goto out;
   3869 	}
   3870 
   3871 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3872 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3873 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3874 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3875 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3876 
   3877 out:
   3878 	/* Disable completion timeout resend */
   3879 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3880 
   3881 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3882 }
   3883 
   3884 void
   3885 wm_get_auto_rd_done(struct wm_softc *sc)
   3886 {
   3887 	int i;
   3888 
   3889 	/* wait for eeprom to reload */
   3890 	switch (sc->sc_type) {
   3891 	case WM_T_82571:
   3892 	case WM_T_82572:
   3893 	case WM_T_82573:
   3894 	case WM_T_82574:
   3895 	case WM_T_82583:
   3896 	case WM_T_82575:
   3897 	case WM_T_82576:
   3898 	case WM_T_82580:
   3899 	case WM_T_I350:
   3900 	case WM_T_I354:
   3901 	case WM_T_I210:
   3902 	case WM_T_I211:
   3903 	case WM_T_80003:
   3904 	case WM_T_ICH8:
   3905 	case WM_T_ICH9:
   3906 		for (i = 0; i < 10; i++) {
   3907 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3908 				break;
   3909 			delay(1000);
   3910 		}
   3911 		if (i == 10) {
   3912 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3913 			    "complete\n", device_xname(sc->sc_dev));
   3914 		}
   3915 		break;
   3916 	default:
   3917 		break;
   3918 	}
   3919 }
   3920 
   3921 void
   3922 wm_lan_init_done(struct wm_softc *sc)
   3923 {
   3924 	uint32_t reg = 0;
   3925 	int i;
   3926 
   3927 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3928 		device_xname(sc->sc_dev), __func__));
   3929 
   3930 	/* Wait for eeprom to reload */
   3931 	switch (sc->sc_type) {
   3932 	case WM_T_ICH10:
   3933 	case WM_T_PCH:
   3934 	case WM_T_PCH2:
   3935 	case WM_T_PCH_LPT:
   3936 	case WM_T_PCH_SPT:
   3937 	case WM_T_PCH_CNP:
   3938 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3939 			reg = CSR_READ(sc, WMREG_STATUS);
   3940 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3941 				break;
   3942 			delay(100);
   3943 		}
   3944 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3945 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3946 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3947 		}
   3948 		break;
   3949 	default:
   3950 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3951 		    __func__);
   3952 		break;
   3953 	}
   3954 
   3955 	reg &= ~STATUS_LAN_INIT_DONE;
   3956 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3957 }
   3958 
   3959 void
   3960 wm_get_cfg_done(struct wm_softc *sc)
   3961 {
   3962 	int mask;
   3963 	uint32_t reg;
   3964 	int i;
   3965 
   3966 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3967 		device_xname(sc->sc_dev), __func__));
   3968 
   3969 	/* Wait for eeprom to reload */
   3970 	switch (sc->sc_type) {
   3971 	case WM_T_82542_2_0:
   3972 	case WM_T_82542_2_1:
   3973 		/* null */
   3974 		break;
   3975 	case WM_T_82543:
   3976 	case WM_T_82544:
   3977 	case WM_T_82540:
   3978 	case WM_T_82545:
   3979 	case WM_T_82545_3:
   3980 	case WM_T_82546:
   3981 	case WM_T_82546_3:
   3982 	case WM_T_82541:
   3983 	case WM_T_82541_2:
   3984 	case WM_T_82547:
   3985 	case WM_T_82547_2:
   3986 	case WM_T_82573:
   3987 	case WM_T_82574:
   3988 	case WM_T_82583:
   3989 		/* generic */
   3990 		delay(10*1000);
   3991 		break;
   3992 	case WM_T_80003:
   3993 	case WM_T_82571:
   3994 	case WM_T_82572:
   3995 	case WM_T_82575:
   3996 	case WM_T_82576:
   3997 	case WM_T_82580:
   3998 	case WM_T_I350:
   3999 	case WM_T_I354:
   4000 	case WM_T_I210:
   4001 	case WM_T_I211:
   4002 		if (sc->sc_type == WM_T_82571) {
   4003 			/* Only 82571 shares port 0 */
   4004 			mask = EEMNGCTL_CFGDONE_0;
   4005 		} else
   4006 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4007 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4008 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4009 				break;
   4010 			delay(1000);
   4011 		}
   4012 		if (i >= WM_PHY_CFG_TIMEOUT)
   4013 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   4014 				device_xname(sc->sc_dev), __func__));
   4015 		break;
   4016 	case WM_T_ICH8:
   4017 	case WM_T_ICH9:
   4018 	case WM_T_ICH10:
   4019 	case WM_T_PCH:
   4020 	case WM_T_PCH2:
   4021 	case WM_T_PCH_LPT:
   4022 	case WM_T_PCH_SPT:
   4023 	case WM_T_PCH_CNP:
   4024 		delay(10*1000);
   4025 		if (sc->sc_type >= WM_T_ICH10)
   4026 			wm_lan_init_done(sc);
   4027 		else
   4028 			wm_get_auto_rd_done(sc);
   4029 
   4030 		/* Clear PHY Reset Asserted bit */
   4031 		reg = CSR_READ(sc, WMREG_STATUS);
   4032 		if ((reg & STATUS_PHYRA) != 0)
   4033 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4034 		break;
   4035 	default:
   4036 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4037 		    __func__);
   4038 		break;
   4039 	}
   4040 }
   4041 
   4042 int
   4043 wm_phy_post_reset(struct wm_softc *sc)
   4044 {
   4045 	device_t dev = sc->sc_dev;
   4046 	uint16_t reg;
   4047 	int rv = 0;
   4048 
   4049 	/* This function is only for ICH8 and newer. */
   4050 	if (sc->sc_type < WM_T_ICH8)
   4051 		return 0;
   4052 
   4053 	if (wm_phy_resetisblocked(sc)) {
   4054 		/* XXX */
   4055 		device_printf(dev, "PHY is blocked\n");
   4056 		return -1;
   4057 	}
   4058 
   4059 	/* Allow time for h/w to get to quiescent state after reset */
   4060 	delay(10*1000);
   4061 
   4062 	/* Perform any necessary post-reset workarounds */
   4063 	if (sc->sc_type == WM_T_PCH)
   4064 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4065 	else if (sc->sc_type == WM_T_PCH2)
   4066 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4067 	if (rv != 0)
   4068 		return rv;
   4069 
   4070 	/* Clear the host wakeup bit after lcd reset */
   4071 	if (sc->sc_type >= WM_T_PCH) {
   4072 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4073 		reg &= ~BM_WUC_HOST_WU_BIT;
   4074 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4075 	}
   4076 
   4077 	/* Configure the LCD with the extended configuration region in NVM */
   4078 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4079 		return rv;
   4080 
   4081 	/* Configure the LCD with the OEM bits in NVM */
   4082 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4083 
   4084 	if (sc->sc_type == WM_T_PCH2) {
   4085 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4086 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4087 			delay(10 * 1000);
   4088 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4089 		}
   4090 		/* Set EEE LPI Update Timer to 200usec */
   4091 		rv = sc->phy.acquire(sc);
   4092 		if (rv)
   4093 			return rv;
   4094 		rv = wm_write_emi_reg_locked(dev,
   4095 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4096 		sc->phy.release(sc);
   4097 	}
   4098 
   4099 	return rv;
   4100 }
   4101 
   4102 /* Only for PCH and newer */
   4103 static int
   4104 wm_write_smbus_addr(struct wm_softc *sc)
   4105 {
   4106 	uint32_t strap, freq;
   4107 	uint16_t phy_data;
   4108 	int rv;
   4109 
   4110 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4111 		device_xname(sc->sc_dev), __func__));
   4112 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4113 
   4114 	strap = CSR_READ(sc, WMREG_STRAP);
   4115 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4116 
   4117 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4118 	if (rv != 0)
   4119 		return -1;
   4120 
   4121 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4122 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4123 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4124 
   4125 	if (sc->sc_phytype == WMPHY_I217) {
   4126 		/* Restore SMBus frequency */
   4127 		if (freq --) {
   4128 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4129 			    | HV_SMB_ADDR_FREQ_HIGH);
   4130 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4131 			    HV_SMB_ADDR_FREQ_LOW);
   4132 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4133 			    HV_SMB_ADDR_FREQ_HIGH);
   4134 		} else
   4135 			DPRINTF(WM_DEBUG_INIT,
   4136 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4137 				device_xname(sc->sc_dev), __func__));
   4138 	}
   4139 
   4140 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4141 	    phy_data);
   4142 }
   4143 
   4144 static int
   4145 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4146 {
   4147 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4148 	uint16_t phy_page = 0;
   4149 	int rv = 0;
   4150 
   4151 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4152 		device_xname(sc->sc_dev), __func__));
   4153 
   4154 	switch (sc->sc_type) {
   4155 	case WM_T_ICH8:
   4156 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4157 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4158 			return 0;
   4159 
   4160 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4161 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4162 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4163 			break;
   4164 		}
   4165 		/* FALLTHROUGH */
   4166 	case WM_T_PCH:
   4167 	case WM_T_PCH2:
   4168 	case WM_T_PCH_LPT:
   4169 	case WM_T_PCH_SPT:
   4170 	case WM_T_PCH_CNP:
   4171 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4172 		break;
   4173 	default:
   4174 		return 0;
   4175 	}
   4176 
   4177 	if ((rv = sc->phy.acquire(sc)) != 0)
   4178 		return rv;
   4179 
   4180 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4181 	if ((reg & sw_cfg_mask) == 0)
   4182 		goto release;
   4183 
   4184 	/*
   4185 	 * Make sure HW does not configure LCD from PHY extended configuration
   4186 	 * before SW configuration
   4187 	 */
   4188 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4189 	if ((sc->sc_type < WM_T_PCH2)
   4190 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4191 		goto release;
   4192 
   4193 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4194 		device_xname(sc->sc_dev), __func__));
   4195 	/* word_addr is in DWORD */
   4196 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4197 
   4198 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4199 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4200 	if (cnf_size == 0)
   4201 		goto release;
   4202 
   4203 	if (((sc->sc_type == WM_T_PCH)
   4204 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4205 	    || (sc->sc_type > WM_T_PCH)) {
   4206 		/*
   4207 		 * HW configures the SMBus address and LEDs when the OEM and
   4208 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4209 		 * are cleared, SW will configure them instead.
   4210 		 */
   4211 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4212 			device_xname(sc->sc_dev), __func__));
   4213 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4214 			goto release;
   4215 
   4216 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4217 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4218 		    (uint16_t)reg);
   4219 		if (rv != 0)
   4220 			goto release;
   4221 	}
   4222 
   4223 	/* Configure LCD from extended configuration region. */
   4224 	for (i = 0; i < cnf_size; i++) {
   4225 		uint16_t reg_data, reg_addr;
   4226 
   4227 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4228 			goto release;
   4229 
   4230 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4231 			goto release;
   4232 
   4233 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4234 			phy_page = reg_data;
   4235 
   4236 		reg_addr &= IGPHY_MAXREGADDR;
   4237 		reg_addr |= phy_page;
   4238 
   4239 		KASSERT(sc->phy.writereg_locked != NULL);
   4240 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4241 		    reg_data);
   4242 	}
   4243 
   4244 release:
   4245 	sc->phy.release(sc);
   4246 	return rv;
   4247 }
   4248 
   4249 /*
   4250  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4251  *  @sc:       pointer to the HW structure
   4252  *  @d0_state: boolean if entering d0 or d3 device state
   4253  *
   4254  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4255  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4256  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4257  */
   4258 int
   4259 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4260 {
   4261 	uint32_t mac_reg;
   4262 	uint16_t oem_reg;
   4263 	int rv;
   4264 
   4265 	if (sc->sc_type < WM_T_PCH)
   4266 		return 0;
   4267 
   4268 	rv = sc->phy.acquire(sc);
   4269 	if (rv != 0)
   4270 		return rv;
   4271 
   4272 	if (sc->sc_type == WM_T_PCH) {
   4273 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4274 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4275 			goto release;
   4276 	}
   4277 
   4278 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4279 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4280 		goto release;
   4281 
   4282 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4283 
   4284 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4285 	if (rv != 0)
   4286 		goto release;
   4287 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4288 
   4289 	if (d0_state) {
   4290 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4291 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4292 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4293 			oem_reg |= HV_OEM_BITS_LPLU;
   4294 	} else {
   4295 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4296 		    != 0)
   4297 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4298 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4299 		    != 0)
   4300 			oem_reg |= HV_OEM_BITS_LPLU;
   4301 	}
   4302 
   4303 	/* Set Restart auto-neg to activate the bits */
   4304 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4305 	    && (wm_phy_resetisblocked(sc) == false))
   4306 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4307 
   4308 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4309 
   4310 release:
   4311 	sc->phy.release(sc);
   4312 
   4313 	return rv;
   4314 }
   4315 
   4316 /* Init hardware bits */
   4317 void
   4318 wm_initialize_hardware_bits(struct wm_softc *sc)
   4319 {
   4320 	uint32_t tarc0, tarc1, reg;
   4321 
   4322 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4323 		device_xname(sc->sc_dev), __func__));
   4324 
   4325 	/* For 82571 variant, 80003 and ICHs */
   4326 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4327 	    || (sc->sc_type >= WM_T_80003)) {
   4328 
   4329 		/* Transmit Descriptor Control 0 */
   4330 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4331 		reg |= TXDCTL_COUNT_DESC;
   4332 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4333 
   4334 		/* Transmit Descriptor Control 1 */
   4335 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4336 		reg |= TXDCTL_COUNT_DESC;
   4337 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4338 
   4339 		/* TARC0 */
   4340 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4341 		switch (sc->sc_type) {
   4342 		case WM_T_82571:
   4343 		case WM_T_82572:
   4344 		case WM_T_82573:
   4345 		case WM_T_82574:
   4346 		case WM_T_82583:
   4347 		case WM_T_80003:
   4348 			/* Clear bits 30..27 */
   4349 			tarc0 &= ~__BITS(30, 27);
   4350 			break;
   4351 		default:
   4352 			break;
   4353 		}
   4354 
   4355 		switch (sc->sc_type) {
   4356 		case WM_T_82571:
   4357 		case WM_T_82572:
   4358 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4359 
   4360 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4361 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4362 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4363 			/* 8257[12] Errata No.7 */
   4364 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4365 
   4366 			/* TARC1 bit 28 */
   4367 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4368 				tarc1 &= ~__BIT(28);
   4369 			else
   4370 				tarc1 |= __BIT(28);
   4371 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4372 
   4373 			/*
   4374 			 * 8257[12] Errata No.13
   4375 			 * Disable Dyamic Clock Gating.
   4376 			 */
   4377 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4378 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4379 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4380 			break;
   4381 		case WM_T_82573:
   4382 		case WM_T_82574:
   4383 		case WM_T_82583:
   4384 			if ((sc->sc_type == WM_T_82574)
   4385 			    || (sc->sc_type == WM_T_82583))
   4386 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4387 
   4388 			/* Extended Device Control */
   4389 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4390 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4391 			reg |= __BIT(22);	/* Set bit 22 */
   4392 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4393 
   4394 			/* Device Control */
   4395 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4396 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4397 
   4398 			/* PCIe Control Register */
   4399 			/*
   4400 			 * 82573 Errata (unknown).
   4401 			 *
   4402 			 * 82574 Errata 25 and 82583 Errata 12
   4403 			 * "Dropped Rx Packets":
   4404 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4405 			 */
   4406 			reg = CSR_READ(sc, WMREG_GCR);
   4407 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4408 			CSR_WRITE(sc, WMREG_GCR, reg);
   4409 
   4410 			if ((sc->sc_type == WM_T_82574)
   4411 			    || (sc->sc_type == WM_T_82583)) {
   4412 				/*
   4413 				 * Document says this bit must be set for
   4414 				 * proper operation.
   4415 				 */
   4416 				reg = CSR_READ(sc, WMREG_GCR);
   4417 				reg |= __BIT(22);
   4418 				CSR_WRITE(sc, WMREG_GCR, reg);
   4419 
   4420 				/*
   4421 				 * Apply workaround for hardware errata
   4422 				 * documented in errata docs Fixes issue where
   4423 				 * some error prone or unreliable PCIe
   4424 				 * completions are occurring, particularly
   4425 				 * with ASPM enabled. Without fix, issue can
   4426 				 * cause Tx timeouts.
   4427 				 */
   4428 				reg = CSR_READ(sc, WMREG_GCR2);
   4429 				reg |= __BIT(0);
   4430 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4431 			}
   4432 			break;
   4433 		case WM_T_80003:
   4434 			/* TARC0 */
   4435 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4436 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4437 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4438 
   4439 			/* TARC1 bit 28 */
   4440 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4441 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4442 				tarc1 &= ~__BIT(28);
   4443 			else
   4444 				tarc1 |= __BIT(28);
   4445 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4446 			break;
   4447 		case WM_T_ICH8:
   4448 		case WM_T_ICH9:
   4449 		case WM_T_ICH10:
   4450 		case WM_T_PCH:
   4451 		case WM_T_PCH2:
   4452 		case WM_T_PCH_LPT:
   4453 		case WM_T_PCH_SPT:
   4454 		case WM_T_PCH_CNP:
   4455 			/* TARC0 */
   4456 			if (sc->sc_type == WM_T_ICH8) {
   4457 				/* Set TARC0 bits 29 and 28 */
   4458 				tarc0 |= __BITS(29, 28);
   4459 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4460 				tarc0 |= __BIT(29);
   4461 				/*
   4462 				 *  Drop bit 28. From Linux.
   4463 				 * See I218/I219 spec update
   4464 				 * "5. Buffer Overrun While the I219 is
   4465 				 * Processing DMA Transactions"
   4466 				 */
   4467 				tarc0 &= ~__BIT(28);
   4468 			}
   4469 			/* Set TARC0 bits 23,24,26,27 */
   4470 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4471 
   4472 			/* CTRL_EXT */
   4473 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4474 			reg |= __BIT(22);	/* Set bit 22 */
   4475 			/*
   4476 			 * Enable PHY low-power state when MAC is at D3
   4477 			 * w/o WoL
   4478 			 */
   4479 			if (sc->sc_type >= WM_T_PCH)
   4480 				reg |= CTRL_EXT_PHYPDEN;
   4481 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4482 
   4483 			/* TARC1 */
   4484 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4485 			/* bit 28 */
   4486 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4487 				tarc1 &= ~__BIT(28);
   4488 			else
   4489 				tarc1 |= __BIT(28);
   4490 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4491 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4492 
   4493 			/* Device Status */
   4494 			if (sc->sc_type == WM_T_ICH8) {
   4495 				reg = CSR_READ(sc, WMREG_STATUS);
   4496 				reg &= ~__BIT(31);
   4497 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4498 
   4499 			}
   4500 
   4501 			/* IOSFPC */
   4502 			if (sc->sc_type == WM_T_PCH_SPT) {
   4503 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4504 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4505 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4506 			}
   4507 			/*
   4508 			 * Work-around descriptor data corruption issue during
   4509 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4510 			 * capability.
   4511 			 */
   4512 			reg = CSR_READ(sc, WMREG_RFCTL);
   4513 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4514 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4515 			break;
   4516 		default:
   4517 			break;
   4518 		}
   4519 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4520 
   4521 		switch (sc->sc_type) {
   4522 		/*
   4523 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4524 		 * Avoid RSS Hash Value bug.
   4525 		 */
   4526 		case WM_T_82571:
   4527 		case WM_T_82572:
   4528 		case WM_T_82573:
   4529 		case WM_T_80003:
   4530 		case WM_T_ICH8:
   4531 			reg = CSR_READ(sc, WMREG_RFCTL);
   4532 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4533 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4534 			break;
   4535 		case WM_T_82574:
   4536 			/* Use extened Rx descriptor. */
   4537 			reg = CSR_READ(sc, WMREG_RFCTL);
   4538 			reg |= WMREG_RFCTL_EXSTEN;
   4539 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4540 			break;
   4541 		default:
   4542 			break;
   4543 		}
   4544 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4545 		/*
   4546 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4547 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4548 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4549 		 * Correctly by the Device"
   4550 		 *
   4551 		 * I354(C2000) Errata AVR53:
   4552 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4553 		 * Hang"
   4554 		 */
   4555 		reg = CSR_READ(sc, WMREG_RFCTL);
   4556 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4557 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4558 	}
   4559 }
   4560 
   4561 static uint32_t
   4562 wm_rxpbs_adjust_82580(uint32_t val)
   4563 {
   4564 	uint32_t rv = 0;
   4565 
   4566 	if (val < __arraycount(wm_82580_rxpbs_table))
   4567 		rv = wm_82580_rxpbs_table[val];
   4568 
   4569 	return rv;
   4570 }
   4571 
   4572 /*
   4573  * wm_reset_phy:
   4574  *
   4575  *	generic PHY reset function.
   4576  *	Same as e1000_phy_hw_reset_generic()
   4577  */
   4578 static int
   4579 wm_reset_phy(struct wm_softc *sc)
   4580 {
   4581 	uint32_t reg;
   4582 
   4583 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4584 		device_xname(sc->sc_dev), __func__));
   4585 	if (wm_phy_resetisblocked(sc))
   4586 		return -1;
   4587 
   4588 	sc->phy.acquire(sc);
   4589 
   4590 	reg = CSR_READ(sc, WMREG_CTRL);
   4591 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4592 	CSR_WRITE_FLUSH(sc);
   4593 
   4594 	delay(sc->phy.reset_delay_us);
   4595 
   4596 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4597 	CSR_WRITE_FLUSH(sc);
   4598 
   4599 	delay(150);
   4600 
   4601 	sc->phy.release(sc);
   4602 
   4603 	wm_get_cfg_done(sc);
   4604 	wm_phy_post_reset(sc);
   4605 
   4606 	return 0;
   4607 }
   4608 
   4609 /*
   4610  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4611  * so it is enough to check sc->sc_queue[0] only.
   4612  */
   4613 static void
   4614 wm_flush_desc_rings(struct wm_softc *sc)
   4615 {
   4616 	pcireg_t preg;
   4617 	uint32_t reg;
   4618 	struct wm_txqueue *txq;
   4619 	wiseman_txdesc_t *txd;
   4620 	int nexttx;
   4621 	uint32_t rctl;
   4622 
   4623 	/* First, disable MULR fix in FEXTNVM11 */
   4624 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4625 	reg |= FEXTNVM11_DIS_MULRFIX;
   4626 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4627 
   4628 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4629 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4630 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4631 		return;
   4632 
   4633 	/* TX */
   4634 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4635 	    preg, reg);
   4636 	reg = CSR_READ(sc, WMREG_TCTL);
   4637 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4638 
   4639 	txq = &sc->sc_queue[0].wmq_txq;
   4640 	nexttx = txq->txq_next;
   4641 	txd = &txq->txq_descs[nexttx];
   4642 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4643 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4644 	txd->wtx_fields.wtxu_status = 0;
   4645 	txd->wtx_fields.wtxu_options = 0;
   4646 	txd->wtx_fields.wtxu_vlan = 0;
   4647 
   4648 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4649 	    BUS_SPACE_BARRIER_WRITE);
   4650 
   4651 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4652 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4653 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4654 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4655 	delay(250);
   4656 
   4657 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4658 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4659 		return;
   4660 
   4661 	/* RX */
   4662 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4663 	rctl = CSR_READ(sc, WMREG_RCTL);
   4664 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4665 	CSR_WRITE_FLUSH(sc);
   4666 	delay(150);
   4667 
   4668 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4669 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4670 	reg &= 0xffffc000;
   4671 	/*
   4672 	 * Update thresholds: prefetch threshold to 31, host threshold
   4673 	 * to 1 and make sure the granularity is "descriptors" and not
   4674 	 * "cache lines"
   4675 	 */
   4676 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4677 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4678 
   4679 	/* Momentarily enable the RX ring for the changes to take effect */
   4680 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4681 	CSR_WRITE_FLUSH(sc);
   4682 	delay(150);
   4683 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4684 }
   4685 
   4686 /*
   4687  * wm_reset:
   4688  *
   4689  *	Reset the i82542 chip.
   4690  */
   4691 static void
   4692 wm_reset(struct wm_softc *sc)
   4693 {
   4694 	int phy_reset = 0;
   4695 	int i, error = 0;
   4696 	uint32_t reg;
   4697 	uint16_t kmreg;
   4698 	int rv;
   4699 
   4700 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4701 		device_xname(sc->sc_dev), __func__));
   4702 	KASSERT(sc->sc_type != 0);
   4703 
   4704 	/*
   4705 	 * Allocate on-chip memory according to the MTU size.
   4706 	 * The Packet Buffer Allocation register must be written
   4707 	 * before the chip is reset.
   4708 	 */
   4709 	switch (sc->sc_type) {
   4710 	case WM_T_82547:
   4711 	case WM_T_82547_2:
   4712 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4713 		    PBA_22K : PBA_30K;
   4714 		for (i = 0; i < sc->sc_nqueues; i++) {
   4715 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4716 			txq->txq_fifo_head = 0;
   4717 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4718 			txq->txq_fifo_size =
   4719 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4720 			txq->txq_fifo_stall = 0;
   4721 		}
   4722 		break;
   4723 	case WM_T_82571:
   4724 	case WM_T_82572:
   4725 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4726 	case WM_T_80003:
   4727 		sc->sc_pba = PBA_32K;
   4728 		break;
   4729 	case WM_T_82573:
   4730 		sc->sc_pba = PBA_12K;
   4731 		break;
   4732 	case WM_T_82574:
   4733 	case WM_T_82583:
   4734 		sc->sc_pba = PBA_20K;
   4735 		break;
   4736 	case WM_T_82576:
   4737 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4738 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4739 		break;
   4740 	case WM_T_82580:
   4741 	case WM_T_I350:
   4742 	case WM_T_I354:
   4743 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4744 		break;
   4745 	case WM_T_I210:
   4746 	case WM_T_I211:
   4747 		sc->sc_pba = PBA_34K;
   4748 		break;
   4749 	case WM_T_ICH8:
   4750 		/* Workaround for a bit corruption issue in FIFO memory */
   4751 		sc->sc_pba = PBA_8K;
   4752 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4753 		break;
   4754 	case WM_T_ICH9:
   4755 	case WM_T_ICH10:
   4756 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4757 		    PBA_14K : PBA_10K;
   4758 		break;
   4759 	case WM_T_PCH:
   4760 	case WM_T_PCH2:	/* XXX 14K? */
   4761 	case WM_T_PCH_LPT:
   4762 	case WM_T_PCH_SPT:
   4763 	case WM_T_PCH_CNP:
   4764 		sc->sc_pba = PBA_26K;
   4765 		break;
   4766 	default:
   4767 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4768 		    PBA_40K : PBA_48K;
   4769 		break;
   4770 	}
   4771 	/*
   4772 	 * Only old or non-multiqueue devices have the PBA register
   4773 	 * XXX Need special handling for 82575.
   4774 	 */
   4775 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4776 	    || (sc->sc_type == WM_T_82575))
   4777 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4778 
   4779 	/* Prevent the PCI-E bus from sticking */
   4780 	if (sc->sc_flags & WM_F_PCIE) {
   4781 		int timeout = 800;
   4782 
   4783 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4784 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4785 
   4786 		while (timeout--) {
   4787 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4788 			    == 0)
   4789 				break;
   4790 			delay(100);
   4791 		}
   4792 		if (timeout == 0)
   4793 			device_printf(sc->sc_dev,
   4794 			    "failed to disable busmastering\n");
   4795 	}
   4796 
   4797 	/* Set the completion timeout for interface */
   4798 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4799 	    || (sc->sc_type == WM_T_82580)
   4800 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4801 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4802 		wm_set_pcie_completion_timeout(sc);
   4803 
   4804 	/* Clear interrupt */
   4805 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4806 	if (wm_is_using_msix(sc)) {
   4807 		if (sc->sc_type != WM_T_82574) {
   4808 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4809 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4810 		} else
   4811 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4812 	}
   4813 
   4814 	/* Stop the transmit and receive processes. */
   4815 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4816 	sc->sc_rctl &= ~RCTL_EN;
   4817 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4818 	CSR_WRITE_FLUSH(sc);
   4819 
   4820 	/* XXX set_tbi_sbp_82543() */
   4821 
   4822 	delay(10*1000);
   4823 
   4824 	/* Must acquire the MDIO ownership before MAC reset */
   4825 	switch (sc->sc_type) {
   4826 	case WM_T_82573:
   4827 	case WM_T_82574:
   4828 	case WM_T_82583:
   4829 		error = wm_get_hw_semaphore_82573(sc);
   4830 		break;
   4831 	default:
   4832 		break;
   4833 	}
   4834 
   4835 	/*
   4836 	 * 82541 Errata 29? & 82547 Errata 28?
   4837 	 * See also the description about PHY_RST bit in CTRL register
   4838 	 * in 8254x_GBe_SDM.pdf.
   4839 	 */
   4840 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4841 		CSR_WRITE(sc, WMREG_CTRL,
   4842 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4843 		CSR_WRITE_FLUSH(sc);
   4844 		delay(5000);
   4845 	}
   4846 
   4847 	switch (sc->sc_type) {
   4848 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4849 	case WM_T_82541:
   4850 	case WM_T_82541_2:
   4851 	case WM_T_82547:
   4852 	case WM_T_82547_2:
   4853 		/*
   4854 		 * On some chipsets, a reset through a memory-mapped write
   4855 		 * cycle can cause the chip to reset before completing the
   4856 		 * write cycle. This causes major headache that can be avoided
   4857 		 * by issuing the reset via indirect register writes through
   4858 		 * I/O space.
   4859 		 *
   4860 		 * So, if we successfully mapped the I/O BAR at attach time,
   4861 		 * use that. Otherwise, try our luck with a memory-mapped
   4862 		 * reset.
   4863 		 */
   4864 		if (sc->sc_flags & WM_F_IOH_VALID)
   4865 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4866 		else
   4867 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4868 		break;
   4869 	case WM_T_82545_3:
   4870 	case WM_T_82546_3:
   4871 		/* Use the shadow control register on these chips. */
   4872 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4873 		break;
   4874 	case WM_T_80003:
   4875 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4876 		sc->phy.acquire(sc);
   4877 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4878 		sc->phy.release(sc);
   4879 		break;
   4880 	case WM_T_ICH8:
   4881 	case WM_T_ICH9:
   4882 	case WM_T_ICH10:
   4883 	case WM_T_PCH:
   4884 	case WM_T_PCH2:
   4885 	case WM_T_PCH_LPT:
   4886 	case WM_T_PCH_SPT:
   4887 	case WM_T_PCH_CNP:
   4888 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4889 		if (wm_phy_resetisblocked(sc) == false) {
   4890 			/*
   4891 			 * Gate automatic PHY configuration by hardware on
   4892 			 * non-managed 82579
   4893 			 */
   4894 			if ((sc->sc_type == WM_T_PCH2)
   4895 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4896 				== 0))
   4897 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4898 
   4899 			reg |= CTRL_PHY_RESET;
   4900 			phy_reset = 1;
   4901 		} else
   4902 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   4903 		sc->phy.acquire(sc);
   4904 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4905 		/* Don't insert a completion barrier when reset */
   4906 		delay(20*1000);
   4907 		mutex_exit(sc->sc_ich_phymtx);
   4908 		break;
   4909 	case WM_T_82580:
   4910 	case WM_T_I350:
   4911 	case WM_T_I354:
   4912 	case WM_T_I210:
   4913 	case WM_T_I211:
   4914 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4915 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4916 			CSR_WRITE_FLUSH(sc);
   4917 		delay(5000);
   4918 		break;
   4919 	case WM_T_82542_2_0:
   4920 	case WM_T_82542_2_1:
   4921 	case WM_T_82543:
   4922 	case WM_T_82540:
   4923 	case WM_T_82545:
   4924 	case WM_T_82546:
   4925 	case WM_T_82571:
   4926 	case WM_T_82572:
   4927 	case WM_T_82573:
   4928 	case WM_T_82574:
   4929 	case WM_T_82575:
   4930 	case WM_T_82576:
   4931 	case WM_T_82583:
   4932 	default:
   4933 		/* Everything else can safely use the documented method. */
   4934 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4935 		break;
   4936 	}
   4937 
   4938 	/* Must release the MDIO ownership after MAC reset */
   4939 	switch (sc->sc_type) {
   4940 	case WM_T_82573:
   4941 	case WM_T_82574:
   4942 	case WM_T_82583:
   4943 		if (error == 0)
   4944 			wm_put_hw_semaphore_82573(sc);
   4945 		break;
   4946 	default:
   4947 		break;
   4948 	}
   4949 
   4950 	/* Set Phy Config Counter to 50msec */
   4951 	if (sc->sc_type == WM_T_PCH2) {
   4952 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4953 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4954 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4955 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4956 	}
   4957 
   4958 	if (phy_reset != 0)
   4959 		wm_get_cfg_done(sc);
   4960 
   4961 	/* Reload EEPROM */
   4962 	switch (sc->sc_type) {
   4963 	case WM_T_82542_2_0:
   4964 	case WM_T_82542_2_1:
   4965 	case WM_T_82543:
   4966 	case WM_T_82544:
   4967 		delay(10);
   4968 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4969 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4970 		CSR_WRITE_FLUSH(sc);
   4971 		delay(2000);
   4972 		break;
   4973 	case WM_T_82540:
   4974 	case WM_T_82545:
   4975 	case WM_T_82545_3:
   4976 	case WM_T_82546:
   4977 	case WM_T_82546_3:
   4978 		delay(5*1000);
   4979 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4980 		break;
   4981 	case WM_T_82541:
   4982 	case WM_T_82541_2:
   4983 	case WM_T_82547:
   4984 	case WM_T_82547_2:
   4985 		delay(20000);
   4986 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4987 		break;
   4988 	case WM_T_82571:
   4989 	case WM_T_82572:
   4990 	case WM_T_82573:
   4991 	case WM_T_82574:
   4992 	case WM_T_82583:
   4993 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4994 			delay(10);
   4995 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4996 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4997 			CSR_WRITE_FLUSH(sc);
   4998 		}
   4999 		/* check EECD_EE_AUTORD */
   5000 		wm_get_auto_rd_done(sc);
   5001 		/*
   5002 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5003 		 * is set.
   5004 		 */
   5005 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5006 		    || (sc->sc_type == WM_T_82583))
   5007 			delay(25*1000);
   5008 		break;
   5009 	case WM_T_82575:
   5010 	case WM_T_82576:
   5011 	case WM_T_82580:
   5012 	case WM_T_I350:
   5013 	case WM_T_I354:
   5014 	case WM_T_I210:
   5015 	case WM_T_I211:
   5016 	case WM_T_80003:
   5017 		/* check EECD_EE_AUTORD */
   5018 		wm_get_auto_rd_done(sc);
   5019 		break;
   5020 	case WM_T_ICH8:
   5021 	case WM_T_ICH9:
   5022 	case WM_T_ICH10:
   5023 	case WM_T_PCH:
   5024 	case WM_T_PCH2:
   5025 	case WM_T_PCH_LPT:
   5026 	case WM_T_PCH_SPT:
   5027 	case WM_T_PCH_CNP:
   5028 		break;
   5029 	default:
   5030 		panic("%s: unknown type\n", __func__);
   5031 	}
   5032 
   5033 	/* Check whether EEPROM is present or not */
   5034 	switch (sc->sc_type) {
   5035 	case WM_T_82575:
   5036 	case WM_T_82576:
   5037 	case WM_T_82580:
   5038 	case WM_T_I350:
   5039 	case WM_T_I354:
   5040 	case WM_T_ICH8:
   5041 	case WM_T_ICH9:
   5042 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5043 			/* Not found */
   5044 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5045 			if (sc->sc_type == WM_T_82575)
   5046 				wm_reset_init_script_82575(sc);
   5047 		}
   5048 		break;
   5049 	default:
   5050 		break;
   5051 	}
   5052 
   5053 	if (phy_reset != 0)
   5054 		wm_phy_post_reset(sc);
   5055 
   5056 	if ((sc->sc_type == WM_T_82580)
   5057 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5058 		/* Clear global device reset status bit */
   5059 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5060 	}
   5061 
   5062 	/* Clear any pending interrupt events. */
   5063 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5064 	reg = CSR_READ(sc, WMREG_ICR);
   5065 	if (wm_is_using_msix(sc)) {
   5066 		if (sc->sc_type != WM_T_82574) {
   5067 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5068 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5069 		} else
   5070 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5071 	}
   5072 
   5073 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5074 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5075 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5076 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5077 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5078 		reg |= KABGTXD_BGSQLBIAS;
   5079 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5080 	}
   5081 
   5082 	/* Reload sc_ctrl */
   5083 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5084 
   5085 	wm_set_eee(sc);
   5086 
   5087 	/*
   5088 	 * For PCH, this write will make sure that any noise will be detected
   5089 	 * as a CRC error and be dropped rather than show up as a bad packet
   5090 	 * to the DMA engine
   5091 	 */
   5092 	if (sc->sc_type == WM_T_PCH)
   5093 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5094 
   5095 	if (sc->sc_type >= WM_T_82544)
   5096 		CSR_WRITE(sc, WMREG_WUC, 0);
   5097 
   5098 	if (sc->sc_type < WM_T_82575)
   5099 		wm_disable_aspm(sc); /* Workaround for some chips */
   5100 
   5101 	wm_reset_mdicnfg_82580(sc);
   5102 
   5103 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5104 		wm_pll_workaround_i210(sc);
   5105 
   5106 	if (sc->sc_type == WM_T_80003) {
   5107 		/* Default to TRUE to enable the MDIC W/A */
   5108 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5109 
   5110 		rv = wm_kmrn_readreg(sc,
   5111 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5112 		if (rv == 0) {
   5113 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5114 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5115 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5116 			else
   5117 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5118 		}
   5119 	}
   5120 }
   5121 
   5122 /*
   5123  * wm_add_rxbuf:
   5124  *
   5125  *	Add a receive buffer to the indiciated descriptor.
   5126  */
   5127 static int
   5128 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5129 {
   5130 	struct wm_softc *sc = rxq->rxq_sc;
   5131 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5132 	struct mbuf *m;
   5133 	int error;
   5134 
   5135 	KASSERT(mutex_owned(rxq->rxq_lock));
   5136 
   5137 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5138 	if (m == NULL)
   5139 		return ENOBUFS;
   5140 
   5141 	MCLGET(m, M_DONTWAIT);
   5142 	if ((m->m_flags & M_EXT) == 0) {
   5143 		m_freem(m);
   5144 		return ENOBUFS;
   5145 	}
   5146 
   5147 	if (rxs->rxs_mbuf != NULL)
   5148 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5149 
   5150 	rxs->rxs_mbuf = m;
   5151 
   5152 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5153 	/*
   5154 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5155 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5156 	 */
   5157 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5158 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5159 	if (error) {
   5160 		/* XXX XXX XXX */
   5161 		aprint_error_dev(sc->sc_dev,
   5162 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5163 		panic("wm_add_rxbuf");
   5164 	}
   5165 
   5166 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5167 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5168 
   5169 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5170 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5171 			wm_init_rxdesc(rxq, idx);
   5172 	} else
   5173 		wm_init_rxdesc(rxq, idx);
   5174 
   5175 	return 0;
   5176 }
   5177 
   5178 /*
   5179  * wm_rxdrain:
   5180  *
   5181  *	Drain the receive queue.
   5182  */
   5183 static void
   5184 wm_rxdrain(struct wm_rxqueue *rxq)
   5185 {
   5186 	struct wm_softc *sc = rxq->rxq_sc;
   5187 	struct wm_rxsoft *rxs;
   5188 	int i;
   5189 
   5190 	KASSERT(mutex_owned(rxq->rxq_lock));
   5191 
   5192 	for (i = 0; i < WM_NRXDESC; i++) {
   5193 		rxs = &rxq->rxq_soft[i];
   5194 		if (rxs->rxs_mbuf != NULL) {
   5195 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5196 			m_freem(rxs->rxs_mbuf);
   5197 			rxs->rxs_mbuf = NULL;
   5198 		}
   5199 	}
   5200 }
   5201 
   5202 /*
   5203  * Setup registers for RSS.
   5204  *
   5205  * XXX not yet VMDq support
   5206  */
   5207 static void
   5208 wm_init_rss(struct wm_softc *sc)
   5209 {
   5210 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5211 	int i;
   5212 
   5213 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5214 
   5215 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5216 		unsigned int qid, reta_ent;
   5217 
   5218 		qid  = i % sc->sc_nqueues;
   5219 		switch (sc->sc_type) {
   5220 		case WM_T_82574:
   5221 			reta_ent = __SHIFTIN(qid,
   5222 			    RETA_ENT_QINDEX_MASK_82574);
   5223 			break;
   5224 		case WM_T_82575:
   5225 			reta_ent = __SHIFTIN(qid,
   5226 			    RETA_ENT_QINDEX1_MASK_82575);
   5227 			break;
   5228 		default:
   5229 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5230 			break;
   5231 		}
   5232 
   5233 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5234 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5235 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5236 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5237 	}
   5238 
   5239 	rss_getkey((uint8_t *)rss_key);
   5240 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5241 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5242 
   5243 	if (sc->sc_type == WM_T_82574)
   5244 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5245 	else
   5246 		mrqc = MRQC_ENABLE_RSS_MQ;
   5247 
   5248 	/*
   5249 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5250 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5251 	 */
   5252 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5253 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5254 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5255 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5256 
   5257 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5258 }
   5259 
   5260 /*
   5261  * Adjust TX and RX queue numbers which the system actulally uses.
   5262  *
   5263  * The numbers are affected by below parameters.
   5264  *     - The nubmer of hardware queues
   5265  *     - The number of MSI-X vectors (= "nvectors" argument)
   5266  *     - ncpu
   5267  */
   5268 static void
   5269 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5270 {
   5271 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5272 
   5273 	if (nvectors < 2) {
   5274 		sc->sc_nqueues = 1;
   5275 		return;
   5276 	}
   5277 
   5278 	switch (sc->sc_type) {
   5279 	case WM_T_82572:
   5280 		hw_ntxqueues = 2;
   5281 		hw_nrxqueues = 2;
   5282 		break;
   5283 	case WM_T_82574:
   5284 		hw_ntxqueues = 2;
   5285 		hw_nrxqueues = 2;
   5286 		break;
   5287 	case WM_T_82575:
   5288 		hw_ntxqueues = 4;
   5289 		hw_nrxqueues = 4;
   5290 		break;
   5291 	case WM_T_82576:
   5292 		hw_ntxqueues = 16;
   5293 		hw_nrxqueues = 16;
   5294 		break;
   5295 	case WM_T_82580:
   5296 	case WM_T_I350:
   5297 	case WM_T_I354:
   5298 		hw_ntxqueues = 8;
   5299 		hw_nrxqueues = 8;
   5300 		break;
   5301 	case WM_T_I210:
   5302 		hw_ntxqueues = 4;
   5303 		hw_nrxqueues = 4;
   5304 		break;
   5305 	case WM_T_I211:
   5306 		hw_ntxqueues = 2;
   5307 		hw_nrxqueues = 2;
   5308 		break;
   5309 		/*
   5310 		 * As below ethernet controllers does not support MSI-X,
   5311 		 * this driver let them not use multiqueue.
   5312 		 *     - WM_T_80003
   5313 		 *     - WM_T_ICH8
   5314 		 *     - WM_T_ICH9
   5315 		 *     - WM_T_ICH10
   5316 		 *     - WM_T_PCH
   5317 		 *     - WM_T_PCH2
   5318 		 *     - WM_T_PCH_LPT
   5319 		 */
   5320 	default:
   5321 		hw_ntxqueues = 1;
   5322 		hw_nrxqueues = 1;
   5323 		break;
   5324 	}
   5325 
   5326 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5327 
   5328 	/*
   5329 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5330 	 * the number of queues used actually.
   5331 	 */
   5332 	if (nvectors < hw_nqueues + 1)
   5333 		sc->sc_nqueues = nvectors - 1;
   5334 	else
   5335 		sc->sc_nqueues = hw_nqueues;
   5336 
   5337 	/*
   5338 	 * As queues more then cpus cannot improve scaling, we limit
   5339 	 * the number of queues used actually.
   5340 	 */
   5341 	if (ncpu < sc->sc_nqueues)
   5342 		sc->sc_nqueues = ncpu;
   5343 }
   5344 
   5345 static inline bool
   5346 wm_is_using_msix(struct wm_softc *sc)
   5347 {
   5348 
   5349 	return (sc->sc_nintrs > 1);
   5350 }
   5351 
   5352 static inline bool
   5353 wm_is_using_multiqueue(struct wm_softc *sc)
   5354 {
   5355 
   5356 	return (sc->sc_nqueues > 1);
   5357 }
   5358 
   5359 static int
   5360 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5361 {
   5362 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5363 	wmq->wmq_id = qidx;
   5364 	wmq->wmq_intr_idx = intr_idx;
   5365 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5366 #ifdef WM_MPSAFE
   5367 	    | SOFTINT_MPSAFE
   5368 #endif
   5369 	    , wm_handle_queue, wmq);
   5370 	if (wmq->wmq_si != NULL)
   5371 		return 0;
   5372 
   5373 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5374 	    wmq->wmq_id);
   5375 
   5376 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5377 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5378 	return ENOMEM;
   5379 }
   5380 
   5381 /*
   5382  * Both single interrupt MSI and INTx can use this function.
   5383  */
   5384 static int
   5385 wm_setup_legacy(struct wm_softc *sc)
   5386 {
   5387 	pci_chipset_tag_t pc = sc->sc_pc;
   5388 	const char *intrstr = NULL;
   5389 	char intrbuf[PCI_INTRSTR_LEN];
   5390 	int error;
   5391 
   5392 	error = wm_alloc_txrx_queues(sc);
   5393 	if (error) {
   5394 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5395 		    error);
   5396 		return ENOMEM;
   5397 	}
   5398 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5399 	    sizeof(intrbuf));
   5400 #ifdef WM_MPSAFE
   5401 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5402 #endif
   5403 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5404 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5405 	if (sc->sc_ihs[0] == NULL) {
   5406 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5407 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5408 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5409 		return ENOMEM;
   5410 	}
   5411 
   5412 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5413 	sc->sc_nintrs = 1;
   5414 
   5415 	return wm_softint_establish(sc, 0, 0);
   5416 }
   5417 
   5418 static int
   5419 wm_setup_msix(struct wm_softc *sc)
   5420 {
   5421 	void *vih;
   5422 	kcpuset_t *affinity;
   5423 	int qidx, error, intr_idx, txrx_established;
   5424 	pci_chipset_tag_t pc = sc->sc_pc;
   5425 	const char *intrstr = NULL;
   5426 	char intrbuf[PCI_INTRSTR_LEN];
   5427 	char intr_xname[INTRDEVNAMEBUF];
   5428 
   5429 	if (sc->sc_nqueues < ncpu) {
   5430 		/*
   5431 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5432 		 * interrupts start from CPU#1.
   5433 		 */
   5434 		sc->sc_affinity_offset = 1;
   5435 	} else {
   5436 		/*
   5437 		 * In this case, this device use all CPUs. So, we unify
   5438 		 * affinitied cpu_index to msix vector number for readability.
   5439 		 */
   5440 		sc->sc_affinity_offset = 0;
   5441 	}
   5442 
   5443 	error = wm_alloc_txrx_queues(sc);
   5444 	if (error) {
   5445 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5446 		    error);
   5447 		return ENOMEM;
   5448 	}
   5449 
   5450 	kcpuset_create(&affinity, false);
   5451 	intr_idx = 0;
   5452 
   5453 	/*
   5454 	 * TX and RX
   5455 	 */
   5456 	txrx_established = 0;
   5457 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5458 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5459 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5460 
   5461 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5462 		    sizeof(intrbuf));
   5463 #ifdef WM_MPSAFE
   5464 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5465 		    PCI_INTR_MPSAFE, true);
   5466 #endif
   5467 		memset(intr_xname, 0, sizeof(intr_xname));
   5468 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5469 		    device_xname(sc->sc_dev), qidx);
   5470 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5471 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5472 		if (vih == NULL) {
   5473 			aprint_error_dev(sc->sc_dev,
   5474 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5475 			    intrstr ? " at " : "",
   5476 			    intrstr ? intrstr : "");
   5477 
   5478 			goto fail;
   5479 		}
   5480 		kcpuset_zero(affinity);
   5481 		/* Round-robin affinity */
   5482 		kcpuset_set(affinity, affinity_to);
   5483 		error = interrupt_distribute(vih, affinity, NULL);
   5484 		if (error == 0) {
   5485 			aprint_normal_dev(sc->sc_dev,
   5486 			    "for TX and RX interrupting at %s affinity to %u\n",
   5487 			    intrstr, affinity_to);
   5488 		} else {
   5489 			aprint_normal_dev(sc->sc_dev,
   5490 			    "for TX and RX interrupting at %s\n", intrstr);
   5491 		}
   5492 		sc->sc_ihs[intr_idx] = vih;
   5493 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5494 			goto fail;
   5495 		txrx_established++;
   5496 		intr_idx++;
   5497 	}
   5498 
   5499 	/* LINK */
   5500 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5501 	    sizeof(intrbuf));
   5502 #ifdef WM_MPSAFE
   5503 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5504 #endif
   5505 	memset(intr_xname, 0, sizeof(intr_xname));
   5506 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5507 	    device_xname(sc->sc_dev));
   5508 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5509 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5510 	if (vih == NULL) {
   5511 		aprint_error_dev(sc->sc_dev,
   5512 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5513 		    intrstr ? " at " : "",
   5514 		    intrstr ? intrstr : "");
   5515 
   5516 		goto fail;
   5517 	}
   5518 	/* Keep default affinity to LINK interrupt */
   5519 	aprint_normal_dev(sc->sc_dev,
   5520 	    "for LINK interrupting at %s\n", intrstr);
   5521 	sc->sc_ihs[intr_idx] = vih;
   5522 	sc->sc_link_intr_idx = intr_idx;
   5523 
   5524 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5525 	kcpuset_destroy(affinity);
   5526 	return 0;
   5527 
   5528  fail:
   5529 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5530 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5531 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5532 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5533 	}
   5534 
   5535 	kcpuset_destroy(affinity);
   5536 	return ENOMEM;
   5537 }
   5538 
   5539 static void
   5540 wm_unset_stopping_flags(struct wm_softc *sc)
   5541 {
   5542 	int i;
   5543 
   5544 	KASSERT(WM_CORE_LOCKED(sc));
   5545 
   5546 	/* Must unset stopping flags in ascending order. */
   5547 	for (i = 0; i < sc->sc_nqueues; i++) {
   5548 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5549 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5550 
   5551 		mutex_enter(txq->txq_lock);
   5552 		txq->txq_stopping = false;
   5553 		mutex_exit(txq->txq_lock);
   5554 
   5555 		mutex_enter(rxq->rxq_lock);
   5556 		rxq->rxq_stopping = false;
   5557 		mutex_exit(rxq->rxq_lock);
   5558 	}
   5559 
   5560 	sc->sc_core_stopping = false;
   5561 }
   5562 
   5563 static void
   5564 wm_set_stopping_flags(struct wm_softc *sc)
   5565 {
   5566 	int i;
   5567 
   5568 	KASSERT(WM_CORE_LOCKED(sc));
   5569 
   5570 	sc->sc_core_stopping = true;
   5571 
   5572 	/* Must set stopping flags in ascending order. */
   5573 	for (i = 0; i < sc->sc_nqueues; i++) {
   5574 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5575 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5576 
   5577 		mutex_enter(rxq->rxq_lock);
   5578 		rxq->rxq_stopping = true;
   5579 		mutex_exit(rxq->rxq_lock);
   5580 
   5581 		mutex_enter(txq->txq_lock);
   5582 		txq->txq_stopping = true;
   5583 		mutex_exit(txq->txq_lock);
   5584 	}
   5585 }
   5586 
   5587 /*
   5588  * Write interrupt interval value to ITR or EITR
   5589  */
   5590 static void
   5591 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5592 {
   5593 
   5594 	if (!wmq->wmq_set_itr)
   5595 		return;
   5596 
   5597 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5598 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5599 
   5600 		/*
   5601 		 * 82575 doesn't have CNT_INGR field.
   5602 		 * So, overwrite counter field by software.
   5603 		 */
   5604 		if (sc->sc_type == WM_T_82575)
   5605 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5606 		else
   5607 			eitr |= EITR_CNT_INGR;
   5608 
   5609 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5610 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5611 		/*
   5612 		 * 82574 has both ITR and EITR. SET EITR when we use
   5613 		 * the multi queue function with MSI-X.
   5614 		 */
   5615 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5616 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5617 	} else {
   5618 		KASSERT(wmq->wmq_id == 0);
   5619 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5620 	}
   5621 
   5622 	wmq->wmq_set_itr = false;
   5623 }
   5624 
   5625 /*
   5626  * TODO
   5627  * Below dynamic calculation of itr is almost the same as linux igb,
   5628  * however it does not fit to wm(4). So, we will have been disable AIM
   5629  * until we will find appropriate calculation of itr.
   5630  */
   5631 /*
   5632  * calculate interrupt interval value to be going to write register in
   5633  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5634  */
   5635 static void
   5636 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5637 {
   5638 #ifdef NOTYET
   5639 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5640 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5641 	uint32_t avg_size = 0;
   5642 	uint32_t new_itr;
   5643 
   5644 	if (rxq->rxq_packets)
   5645 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5646 	if (txq->txq_packets)
   5647 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5648 
   5649 	if (avg_size == 0) {
   5650 		new_itr = 450; /* restore default value */
   5651 		goto out;
   5652 	}
   5653 
   5654 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5655 	avg_size += 24;
   5656 
   5657 	/* Don't starve jumbo frames */
   5658 	avg_size = uimin(avg_size, 3000);
   5659 
   5660 	/* Give a little boost to mid-size frames */
   5661 	if ((avg_size > 300) && (avg_size < 1200))
   5662 		new_itr = avg_size / 3;
   5663 	else
   5664 		new_itr = avg_size / 2;
   5665 
   5666 out:
   5667 	/*
   5668 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5669 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5670 	 */
   5671 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5672 		new_itr *= 4;
   5673 
   5674 	if (new_itr != wmq->wmq_itr) {
   5675 		wmq->wmq_itr = new_itr;
   5676 		wmq->wmq_set_itr = true;
   5677 	} else
   5678 		wmq->wmq_set_itr = false;
   5679 
   5680 	rxq->rxq_packets = 0;
   5681 	rxq->rxq_bytes = 0;
   5682 	txq->txq_packets = 0;
   5683 	txq->txq_bytes = 0;
   5684 #endif
   5685 }
   5686 
   5687 /*
   5688  * wm_init:		[ifnet interface function]
   5689  *
   5690  *	Initialize the interface.
   5691  */
   5692 static int
   5693 wm_init(struct ifnet *ifp)
   5694 {
   5695 	struct wm_softc *sc = ifp->if_softc;
   5696 	int ret;
   5697 
   5698 	WM_CORE_LOCK(sc);
   5699 	ret = wm_init_locked(ifp);
   5700 	WM_CORE_UNLOCK(sc);
   5701 
   5702 	return ret;
   5703 }
   5704 
   5705 static int
   5706 wm_init_locked(struct ifnet *ifp)
   5707 {
   5708 	struct wm_softc *sc = ifp->if_softc;
   5709 	struct ethercom *ec = &sc->sc_ethercom;
   5710 	int i, j, trynum, error = 0;
   5711 	uint32_t reg, sfp_mask = 0;
   5712 
   5713 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5714 		device_xname(sc->sc_dev), __func__));
   5715 	KASSERT(WM_CORE_LOCKED(sc));
   5716 
   5717 	/*
   5718 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5719 	 * There is a small but measurable benefit to avoiding the adjusment
   5720 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5721 	 * on such platforms.  One possibility is that the DMA itself is
   5722 	 * slightly more efficient if the front of the entire packet (instead
   5723 	 * of the front of the headers) is aligned.
   5724 	 *
   5725 	 * Note we must always set align_tweak to 0 if we are using
   5726 	 * jumbo frames.
   5727 	 */
   5728 #ifdef __NO_STRICT_ALIGNMENT
   5729 	sc->sc_align_tweak = 0;
   5730 #else
   5731 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5732 		sc->sc_align_tweak = 0;
   5733 	else
   5734 		sc->sc_align_tweak = 2;
   5735 #endif /* __NO_STRICT_ALIGNMENT */
   5736 
   5737 	/* Cancel any pending I/O. */
   5738 	wm_stop_locked(ifp, 0);
   5739 
   5740 	/* Update statistics before reset */
   5741 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5742 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5743 
   5744 	/* PCH_SPT hardware workaround */
   5745 	if (sc->sc_type == WM_T_PCH_SPT)
   5746 		wm_flush_desc_rings(sc);
   5747 
   5748 	/* Reset the chip to a known state. */
   5749 	wm_reset(sc);
   5750 
   5751 	/*
   5752 	 * AMT based hardware can now take control from firmware
   5753 	 * Do this after reset.
   5754 	 */
   5755 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5756 		wm_get_hw_control(sc);
   5757 
   5758 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5759 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5760 		wm_legacy_irq_quirk_spt(sc);
   5761 
   5762 	/* Init hardware bits */
   5763 	wm_initialize_hardware_bits(sc);
   5764 
   5765 	/* Reset the PHY. */
   5766 	if (sc->sc_flags & WM_F_HAS_MII)
   5767 		wm_gmii_reset(sc);
   5768 
   5769 	if (sc->sc_type >= WM_T_ICH8) {
   5770 		reg = CSR_READ(sc, WMREG_GCR);
   5771 		/*
   5772 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5773 		 * default after reset.
   5774 		 */
   5775 		if (sc->sc_type == WM_T_ICH8)
   5776 			reg |= GCR_NO_SNOOP_ALL;
   5777 		else
   5778 			reg &= ~GCR_NO_SNOOP_ALL;
   5779 		CSR_WRITE(sc, WMREG_GCR, reg);
   5780 	}
   5781 	if ((sc->sc_type >= WM_T_ICH8)
   5782 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5783 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5784 
   5785 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5786 		reg |= CTRL_EXT_RO_DIS;
   5787 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5788 	}
   5789 
   5790 	/* Calculate (E)ITR value */
   5791 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5792 		/*
   5793 		 * For NEWQUEUE's EITR (except for 82575).
   5794 		 * 82575's EITR should be set same throttling value as other
   5795 		 * old controllers' ITR because the interrupt/sec calculation
   5796 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5797 		 *
   5798 		 * 82574's EITR should be set same throttling value as ITR.
   5799 		 *
   5800 		 * For N interrupts/sec, set this value to:
   5801 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5802 		 */
   5803 		sc->sc_itr_init = 450;
   5804 	} else if (sc->sc_type >= WM_T_82543) {
   5805 		/*
   5806 		 * Set up the interrupt throttling register (units of 256ns)
   5807 		 * Note that a footnote in Intel's documentation says this
   5808 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5809 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5810 		 * that that is also true for the 1024ns units of the other
   5811 		 * interrupt-related timer registers -- so, really, we ought
   5812 		 * to divide this value by 4 when the link speed is low.
   5813 		 *
   5814 		 * XXX implement this division at link speed change!
   5815 		 */
   5816 
   5817 		/*
   5818 		 * For N interrupts/sec, set this value to:
   5819 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5820 		 * absolute and packet timer values to this value
   5821 		 * divided by 4 to get "simple timer" behavior.
   5822 		 */
   5823 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5824 	}
   5825 
   5826 	error = wm_init_txrx_queues(sc);
   5827 	if (error)
   5828 		goto out;
   5829 
   5830 	/* Clear out the VLAN table -- we don't use it (yet). */
   5831 	CSR_WRITE(sc, WMREG_VET, 0);
   5832 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5833 		trynum = 10; /* Due to hw errata */
   5834 	else
   5835 		trynum = 1;
   5836 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5837 		for (j = 0; j < trynum; j++)
   5838 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5839 
   5840 	/*
   5841 	 * Set up flow-control parameters.
   5842 	 *
   5843 	 * XXX Values could probably stand some tuning.
   5844 	 */
   5845 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5846 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5847 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5848 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5849 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5850 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5851 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5852 	}
   5853 
   5854 	sc->sc_fcrtl = FCRTL_DFLT;
   5855 	if (sc->sc_type < WM_T_82543) {
   5856 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5857 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5858 	} else {
   5859 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5860 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5861 	}
   5862 
   5863 	if (sc->sc_type == WM_T_80003)
   5864 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5865 	else
   5866 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5867 
   5868 	/* Writes the control register. */
   5869 	wm_set_vlan(sc);
   5870 
   5871 	if (sc->sc_flags & WM_F_HAS_MII) {
   5872 		uint16_t kmreg;
   5873 
   5874 		switch (sc->sc_type) {
   5875 		case WM_T_80003:
   5876 		case WM_T_ICH8:
   5877 		case WM_T_ICH9:
   5878 		case WM_T_ICH10:
   5879 		case WM_T_PCH:
   5880 		case WM_T_PCH2:
   5881 		case WM_T_PCH_LPT:
   5882 		case WM_T_PCH_SPT:
   5883 		case WM_T_PCH_CNP:
   5884 			/*
   5885 			 * Set the mac to wait the maximum time between each
   5886 			 * iteration and increase the max iterations when
   5887 			 * polling the phy; this fixes erroneous timeouts at
   5888 			 * 10Mbps.
   5889 			 */
   5890 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5891 			    0xFFFF);
   5892 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5893 			    &kmreg);
   5894 			kmreg |= 0x3F;
   5895 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5896 			    kmreg);
   5897 			break;
   5898 		default:
   5899 			break;
   5900 		}
   5901 
   5902 		if (sc->sc_type == WM_T_80003) {
   5903 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5904 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5905 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5906 
   5907 			/* Bypass RX and TX FIFO's */
   5908 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5909 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5910 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5911 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5912 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5913 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5914 		}
   5915 	}
   5916 #if 0
   5917 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5918 #endif
   5919 
   5920 	/* Set up checksum offload parameters. */
   5921 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5922 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5923 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5924 		reg |= RXCSUM_IPOFL;
   5925 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5926 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5927 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5928 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5929 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5930 
   5931 	/* Set registers about MSI-X */
   5932 	if (wm_is_using_msix(sc)) {
   5933 		uint32_t ivar, qintr_idx;
   5934 		struct wm_queue *wmq;
   5935 		unsigned int qid;
   5936 
   5937 		if (sc->sc_type == WM_T_82575) {
   5938 			/* Interrupt control */
   5939 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5940 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5941 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5942 
   5943 			/* TX and RX */
   5944 			for (i = 0; i < sc->sc_nqueues; i++) {
   5945 				wmq = &sc->sc_queue[i];
   5946 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5947 				    EITR_TX_QUEUE(wmq->wmq_id)
   5948 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5949 			}
   5950 			/* Link status */
   5951 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5952 			    EITR_OTHER);
   5953 		} else if (sc->sc_type == WM_T_82574) {
   5954 			/* Interrupt control */
   5955 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5956 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5957 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5958 
   5959 			/*
   5960 			 * Workaround issue with spurious interrupts
   5961 			 * in MSI-X mode.
   5962 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5963 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5964 			 */
   5965 			reg = CSR_READ(sc, WMREG_RFCTL);
   5966 			reg |= WMREG_RFCTL_ACKDIS;
   5967 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5968 
   5969 			ivar = 0;
   5970 			/* TX and RX */
   5971 			for (i = 0; i < sc->sc_nqueues; i++) {
   5972 				wmq = &sc->sc_queue[i];
   5973 				qid = wmq->wmq_id;
   5974 				qintr_idx = wmq->wmq_intr_idx;
   5975 
   5976 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5977 				    IVAR_TX_MASK_Q_82574(qid));
   5978 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5979 				    IVAR_RX_MASK_Q_82574(qid));
   5980 			}
   5981 			/* Link status */
   5982 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5983 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5984 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5985 		} else {
   5986 			/* Interrupt control */
   5987 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5988 			    | GPIE_EIAME | GPIE_PBA);
   5989 
   5990 			switch (sc->sc_type) {
   5991 			case WM_T_82580:
   5992 			case WM_T_I350:
   5993 			case WM_T_I354:
   5994 			case WM_T_I210:
   5995 			case WM_T_I211:
   5996 				/* TX and RX */
   5997 				for (i = 0; i < sc->sc_nqueues; i++) {
   5998 					wmq = &sc->sc_queue[i];
   5999 					qid = wmq->wmq_id;
   6000 					qintr_idx = wmq->wmq_intr_idx;
   6001 
   6002 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6003 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6004 					ivar |= __SHIFTIN((qintr_idx
   6005 						| IVAR_VALID),
   6006 					    IVAR_TX_MASK_Q(qid));
   6007 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6008 					ivar |= __SHIFTIN((qintr_idx
   6009 						| IVAR_VALID),
   6010 					    IVAR_RX_MASK_Q(qid));
   6011 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6012 				}
   6013 				break;
   6014 			case WM_T_82576:
   6015 				/* TX and RX */
   6016 				for (i = 0; i < sc->sc_nqueues; i++) {
   6017 					wmq = &sc->sc_queue[i];
   6018 					qid = wmq->wmq_id;
   6019 					qintr_idx = wmq->wmq_intr_idx;
   6020 
   6021 					ivar = CSR_READ(sc,
   6022 					    WMREG_IVAR_Q_82576(qid));
   6023 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6024 					ivar |= __SHIFTIN((qintr_idx
   6025 						| IVAR_VALID),
   6026 					    IVAR_TX_MASK_Q_82576(qid));
   6027 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6028 					ivar |= __SHIFTIN((qintr_idx
   6029 						| IVAR_VALID),
   6030 					    IVAR_RX_MASK_Q_82576(qid));
   6031 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6032 					    ivar);
   6033 				}
   6034 				break;
   6035 			default:
   6036 				break;
   6037 			}
   6038 
   6039 			/* Link status */
   6040 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6041 			    IVAR_MISC_OTHER);
   6042 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6043 		}
   6044 
   6045 		if (wm_is_using_multiqueue(sc)) {
   6046 			wm_init_rss(sc);
   6047 
   6048 			/*
   6049 			** NOTE: Receive Full-Packet Checksum Offload
   6050 			** is mutually exclusive with Multiqueue. However
   6051 			** this is not the same as TCP/IP checksums which
   6052 			** still work.
   6053 			*/
   6054 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6055 			reg |= RXCSUM_PCSD;
   6056 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6057 		}
   6058 	}
   6059 
   6060 	/* Set up the interrupt registers. */
   6061 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6062 
   6063 	/* Enable SFP module insertion interrupt if it's required */
   6064 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6065 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6066 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6067 		sfp_mask = ICR_GPI(0);
   6068 	}
   6069 
   6070 	if (wm_is_using_msix(sc)) {
   6071 		uint32_t mask;
   6072 		struct wm_queue *wmq;
   6073 
   6074 		switch (sc->sc_type) {
   6075 		case WM_T_82574:
   6076 			mask = 0;
   6077 			for (i = 0; i < sc->sc_nqueues; i++) {
   6078 				wmq = &sc->sc_queue[i];
   6079 				mask |= ICR_TXQ(wmq->wmq_id);
   6080 				mask |= ICR_RXQ(wmq->wmq_id);
   6081 			}
   6082 			mask |= ICR_OTHER;
   6083 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6084 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6085 			break;
   6086 		default:
   6087 			if (sc->sc_type == WM_T_82575) {
   6088 				mask = 0;
   6089 				for (i = 0; i < sc->sc_nqueues; i++) {
   6090 					wmq = &sc->sc_queue[i];
   6091 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6092 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6093 				}
   6094 				mask |= EITR_OTHER;
   6095 			} else {
   6096 				mask = 0;
   6097 				for (i = 0; i < sc->sc_nqueues; i++) {
   6098 					wmq = &sc->sc_queue[i];
   6099 					mask |= 1 << wmq->wmq_intr_idx;
   6100 				}
   6101 				mask |= 1 << sc->sc_link_intr_idx;
   6102 			}
   6103 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6104 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6105 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6106 
   6107 			/* For other interrupts */
   6108 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6109 			break;
   6110 		}
   6111 	} else {
   6112 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6113 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6114 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6115 	}
   6116 
   6117 	/* Set up the inter-packet gap. */
   6118 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6119 
   6120 	if (sc->sc_type >= WM_T_82543) {
   6121 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6122 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6123 			wm_itrs_writereg(sc, wmq);
   6124 		}
   6125 		/*
   6126 		 * Link interrupts occur much less than TX
   6127 		 * interrupts and RX interrupts. So, we don't
   6128 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6129 		 * FreeBSD's if_igb.
   6130 		 */
   6131 	}
   6132 
   6133 	/* Set the VLAN ethernetype. */
   6134 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6135 
   6136 	/*
   6137 	 * Set up the transmit control register; we start out with
   6138 	 * a collision distance suitable for FDX, but update it whe
   6139 	 * we resolve the media type.
   6140 	 */
   6141 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6142 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6143 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6144 	if (sc->sc_type >= WM_T_82571)
   6145 		sc->sc_tctl |= TCTL_MULR;
   6146 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6147 
   6148 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6149 		/* Write TDT after TCTL.EN is set. See the document. */
   6150 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6151 	}
   6152 
   6153 	if (sc->sc_type == WM_T_80003) {
   6154 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6155 		reg &= ~TCTL_EXT_GCEX_MASK;
   6156 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6157 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6158 	}
   6159 
   6160 	/* Set the media. */
   6161 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6162 		goto out;
   6163 
   6164 	/* Configure for OS presence */
   6165 	wm_init_manageability(sc);
   6166 
   6167 	/*
   6168 	 * Set up the receive control register; we actually program the
   6169 	 * register when we set the receive filter. Use multicast address
   6170 	 * offset type 0.
   6171 	 *
   6172 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6173 	 * don't enable that feature.
   6174 	 */
   6175 	sc->sc_mchash_type = 0;
   6176 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6177 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6178 
   6179 	/* 82574 use one buffer extended Rx descriptor. */
   6180 	if (sc->sc_type == WM_T_82574)
   6181 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6182 
   6183 	/*
   6184 	 * The I350 has a bug where it always strips the CRC whether
   6185 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6186 	 */
   6187 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6188 	    || (sc->sc_type == WM_T_I210))
   6189 		sc->sc_rctl |= RCTL_SECRC;
   6190 
   6191 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6192 	    && (ifp->if_mtu > ETHERMTU)) {
   6193 		sc->sc_rctl |= RCTL_LPE;
   6194 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6195 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6196 	}
   6197 
   6198 	if (MCLBYTES == 2048)
   6199 		sc->sc_rctl |= RCTL_2k;
   6200 	else {
   6201 		if (sc->sc_type >= WM_T_82543) {
   6202 			switch (MCLBYTES) {
   6203 			case 4096:
   6204 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6205 				break;
   6206 			case 8192:
   6207 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6208 				break;
   6209 			case 16384:
   6210 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6211 				break;
   6212 			default:
   6213 				panic("wm_init: MCLBYTES %d unsupported",
   6214 				    MCLBYTES);
   6215 				break;
   6216 			}
   6217 		} else
   6218 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6219 	}
   6220 
   6221 	/* Enable ECC */
   6222 	switch (sc->sc_type) {
   6223 	case WM_T_82571:
   6224 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6225 		reg |= PBA_ECC_CORR_EN;
   6226 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6227 		break;
   6228 	case WM_T_PCH_LPT:
   6229 	case WM_T_PCH_SPT:
   6230 	case WM_T_PCH_CNP:
   6231 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6232 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6233 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6234 
   6235 		sc->sc_ctrl |= CTRL_MEHE;
   6236 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6237 		break;
   6238 	default:
   6239 		break;
   6240 	}
   6241 
   6242 	/*
   6243 	 * Set the receive filter.
   6244 	 *
   6245 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6246 	 * the setting of RCTL.EN in wm_set_filter()
   6247 	 */
   6248 	wm_set_filter(sc);
   6249 
   6250 	/* On 575 and later set RDT only if RX enabled */
   6251 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6252 		int qidx;
   6253 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6254 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6255 			for (i = 0; i < WM_NRXDESC; i++) {
   6256 				mutex_enter(rxq->rxq_lock);
   6257 				wm_init_rxdesc(rxq, i);
   6258 				mutex_exit(rxq->rxq_lock);
   6259 
   6260 			}
   6261 		}
   6262 	}
   6263 
   6264 	wm_unset_stopping_flags(sc);
   6265 
   6266 	/* Start the one second link check clock. */
   6267 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6268 
   6269 	/* ...all done! */
   6270 	ifp->if_flags |= IFF_RUNNING;
   6271 	ifp->if_flags &= ~IFF_OACTIVE;
   6272 
   6273  out:
   6274 	/* Save last flags for the callback */
   6275 	sc->sc_if_flags = ifp->if_flags;
   6276 	sc->sc_ec_capenable = ec->ec_capenable;
   6277 	if (error)
   6278 		log(LOG_ERR, "%s: interface not running\n",
   6279 		    device_xname(sc->sc_dev));
   6280 	return error;
   6281 }
   6282 
   6283 /*
   6284  * wm_stop:		[ifnet interface function]
   6285  *
   6286  *	Stop transmission on the interface.
   6287  */
   6288 static void
   6289 wm_stop(struct ifnet *ifp, int disable)
   6290 {
   6291 	struct wm_softc *sc = ifp->if_softc;
   6292 
   6293 	WM_CORE_LOCK(sc);
   6294 	wm_stop_locked(ifp, disable);
   6295 	WM_CORE_UNLOCK(sc);
   6296 }
   6297 
   6298 static void
   6299 wm_stop_locked(struct ifnet *ifp, int disable)
   6300 {
   6301 	struct wm_softc *sc = ifp->if_softc;
   6302 	struct wm_txsoft *txs;
   6303 	int i, qidx;
   6304 
   6305 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6306 		device_xname(sc->sc_dev), __func__));
   6307 	KASSERT(WM_CORE_LOCKED(sc));
   6308 
   6309 	wm_set_stopping_flags(sc);
   6310 
   6311 	/* Stop the one second clock. */
   6312 	callout_stop(&sc->sc_tick_ch);
   6313 
   6314 	/* Stop the 82547 Tx FIFO stall check timer. */
   6315 	if (sc->sc_type == WM_T_82547)
   6316 		callout_stop(&sc->sc_txfifo_ch);
   6317 
   6318 	if (sc->sc_flags & WM_F_HAS_MII) {
   6319 		/* Down the MII. */
   6320 		mii_down(&sc->sc_mii);
   6321 	} else {
   6322 #if 0
   6323 		/* Should we clear PHY's status properly? */
   6324 		wm_reset(sc);
   6325 #endif
   6326 	}
   6327 
   6328 	/* Stop the transmit and receive processes. */
   6329 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6330 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6331 	sc->sc_rctl &= ~RCTL_EN;
   6332 
   6333 	/*
   6334 	 * Clear the interrupt mask to ensure the device cannot assert its
   6335 	 * interrupt line.
   6336 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6337 	 * service any currently pending or shared interrupt.
   6338 	 */
   6339 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6340 	sc->sc_icr = 0;
   6341 	if (wm_is_using_msix(sc)) {
   6342 		if (sc->sc_type != WM_T_82574) {
   6343 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6344 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6345 		} else
   6346 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6347 	}
   6348 
   6349 	/* Release any queued transmit buffers. */
   6350 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6351 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6352 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6353 		mutex_enter(txq->txq_lock);
   6354 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6355 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6356 			txs = &txq->txq_soft[i];
   6357 			if (txs->txs_mbuf != NULL) {
   6358 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6359 				m_freem(txs->txs_mbuf);
   6360 				txs->txs_mbuf = NULL;
   6361 			}
   6362 		}
   6363 		mutex_exit(txq->txq_lock);
   6364 	}
   6365 
   6366 	/* Mark the interface as down and cancel the watchdog timer. */
   6367 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6368 
   6369 	if (disable) {
   6370 		for (i = 0; i < sc->sc_nqueues; i++) {
   6371 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6372 			mutex_enter(rxq->rxq_lock);
   6373 			wm_rxdrain(rxq);
   6374 			mutex_exit(rxq->rxq_lock);
   6375 		}
   6376 	}
   6377 
   6378 #if 0 /* notyet */
   6379 	if (sc->sc_type >= WM_T_82544)
   6380 		CSR_WRITE(sc, WMREG_WUC, 0);
   6381 #endif
   6382 }
   6383 
   6384 static void
   6385 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6386 {
   6387 	struct mbuf *m;
   6388 	int i;
   6389 
   6390 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6391 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6392 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6393 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6394 		    m->m_data, m->m_len, m->m_flags);
   6395 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6396 	    i, i == 1 ? "" : "s");
   6397 }
   6398 
   6399 /*
   6400  * wm_82547_txfifo_stall:
   6401  *
   6402  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6403  *	reset the FIFO pointers, and restart packet transmission.
   6404  */
   6405 static void
   6406 wm_82547_txfifo_stall(void *arg)
   6407 {
   6408 	struct wm_softc *sc = arg;
   6409 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6410 
   6411 	mutex_enter(txq->txq_lock);
   6412 
   6413 	if (txq->txq_stopping)
   6414 		goto out;
   6415 
   6416 	if (txq->txq_fifo_stall) {
   6417 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6418 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6419 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6420 			/*
   6421 			 * Packets have drained.  Stop transmitter, reset
   6422 			 * FIFO pointers, restart transmitter, and kick
   6423 			 * the packet queue.
   6424 			 */
   6425 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6426 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6427 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6428 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6429 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6430 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6431 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6432 			CSR_WRITE_FLUSH(sc);
   6433 
   6434 			txq->txq_fifo_head = 0;
   6435 			txq->txq_fifo_stall = 0;
   6436 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6437 		} else {
   6438 			/*
   6439 			 * Still waiting for packets to drain; try again in
   6440 			 * another tick.
   6441 			 */
   6442 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6443 		}
   6444 	}
   6445 
   6446 out:
   6447 	mutex_exit(txq->txq_lock);
   6448 }
   6449 
   6450 /*
   6451  * wm_82547_txfifo_bugchk:
   6452  *
   6453  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6454  *	prevent enqueueing a packet that would wrap around the end
   6455  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6456  *
   6457  *	We do this by checking the amount of space before the end
   6458  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6459  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6460  *	the internal FIFO pointers to the beginning, and restart
   6461  *	transmission on the interface.
   6462  */
   6463 #define	WM_FIFO_HDR		0x10
   6464 #define	WM_82547_PAD_LEN	0x3e0
   6465 static int
   6466 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6467 {
   6468 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6469 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6470 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6471 
   6472 	/* Just return if already stalled. */
   6473 	if (txq->txq_fifo_stall)
   6474 		return 1;
   6475 
   6476 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6477 		/* Stall only occurs in half-duplex mode. */
   6478 		goto send_packet;
   6479 	}
   6480 
   6481 	if (len >= WM_82547_PAD_LEN + space) {
   6482 		txq->txq_fifo_stall = 1;
   6483 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6484 		return 1;
   6485 	}
   6486 
   6487  send_packet:
   6488 	txq->txq_fifo_head += len;
   6489 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6490 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6491 
   6492 	return 0;
   6493 }
   6494 
   6495 static int
   6496 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6497 {
   6498 	int error;
   6499 
   6500 	/*
   6501 	 * Allocate the control data structures, and create and load the
   6502 	 * DMA map for it.
   6503 	 *
   6504 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6505 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6506 	 * both sets within the same 4G segment.
   6507 	 */
   6508 	if (sc->sc_type < WM_T_82544)
   6509 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6510 	else
   6511 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6512 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6513 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6514 	else
   6515 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6516 
   6517 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6518 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6519 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6520 		aprint_error_dev(sc->sc_dev,
   6521 		    "unable to allocate TX control data, error = %d\n",
   6522 		    error);
   6523 		goto fail_0;
   6524 	}
   6525 
   6526 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6527 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6528 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6529 		aprint_error_dev(sc->sc_dev,
   6530 		    "unable to map TX control data, error = %d\n", error);
   6531 		goto fail_1;
   6532 	}
   6533 
   6534 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6535 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6536 		aprint_error_dev(sc->sc_dev,
   6537 		    "unable to create TX control data DMA map, error = %d\n",
   6538 		    error);
   6539 		goto fail_2;
   6540 	}
   6541 
   6542 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6543 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6544 		aprint_error_dev(sc->sc_dev,
   6545 		    "unable to load TX control data DMA map, error = %d\n",
   6546 		    error);
   6547 		goto fail_3;
   6548 	}
   6549 
   6550 	return 0;
   6551 
   6552  fail_3:
   6553 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6554  fail_2:
   6555 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6556 	    WM_TXDESCS_SIZE(txq));
   6557  fail_1:
   6558 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6559  fail_0:
   6560 	return error;
   6561 }
   6562 
   6563 static void
   6564 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6565 {
   6566 
   6567 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6568 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6569 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6570 	    WM_TXDESCS_SIZE(txq));
   6571 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6572 }
   6573 
   6574 static int
   6575 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6576 {
   6577 	int error;
   6578 	size_t rxq_descs_size;
   6579 
   6580 	/*
   6581 	 * Allocate the control data structures, and create and load the
   6582 	 * DMA map for it.
   6583 	 *
   6584 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6585 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6586 	 * both sets within the same 4G segment.
   6587 	 */
   6588 	rxq->rxq_ndesc = WM_NRXDESC;
   6589 	if (sc->sc_type == WM_T_82574)
   6590 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6591 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6592 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6593 	else
   6594 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6595 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6596 
   6597 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6598 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6599 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6600 		aprint_error_dev(sc->sc_dev,
   6601 		    "unable to allocate RX control data, error = %d\n",
   6602 		    error);
   6603 		goto fail_0;
   6604 	}
   6605 
   6606 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6607 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6608 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6609 		aprint_error_dev(sc->sc_dev,
   6610 		    "unable to map RX control data, error = %d\n", error);
   6611 		goto fail_1;
   6612 	}
   6613 
   6614 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6615 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6616 		aprint_error_dev(sc->sc_dev,
   6617 		    "unable to create RX control data DMA map, error = %d\n",
   6618 		    error);
   6619 		goto fail_2;
   6620 	}
   6621 
   6622 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6623 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6624 		aprint_error_dev(sc->sc_dev,
   6625 		    "unable to load RX control data DMA map, error = %d\n",
   6626 		    error);
   6627 		goto fail_3;
   6628 	}
   6629 
   6630 	return 0;
   6631 
   6632  fail_3:
   6633 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6634  fail_2:
   6635 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6636 	    rxq_descs_size);
   6637  fail_1:
   6638 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6639  fail_0:
   6640 	return error;
   6641 }
   6642 
   6643 static void
   6644 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6645 {
   6646 
   6647 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6648 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6649 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6650 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6651 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6652 }
   6653 
   6654 
   6655 static int
   6656 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6657 {
   6658 	int i, error;
   6659 
   6660 	/* Create the transmit buffer DMA maps. */
   6661 	WM_TXQUEUELEN(txq) =
   6662 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6663 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6664 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6665 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6666 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6667 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6668 			aprint_error_dev(sc->sc_dev,
   6669 			    "unable to create Tx DMA map %d, error = %d\n",
   6670 			    i, error);
   6671 			goto fail;
   6672 		}
   6673 	}
   6674 
   6675 	return 0;
   6676 
   6677  fail:
   6678 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6679 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6680 			bus_dmamap_destroy(sc->sc_dmat,
   6681 			    txq->txq_soft[i].txs_dmamap);
   6682 	}
   6683 	return error;
   6684 }
   6685 
   6686 static void
   6687 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6688 {
   6689 	int i;
   6690 
   6691 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6692 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6693 			bus_dmamap_destroy(sc->sc_dmat,
   6694 			    txq->txq_soft[i].txs_dmamap);
   6695 	}
   6696 }
   6697 
   6698 static int
   6699 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6700 {
   6701 	int i, error;
   6702 
   6703 	/* Create the receive buffer DMA maps. */
   6704 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6705 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6706 			    MCLBYTES, 0, 0,
   6707 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6708 			aprint_error_dev(sc->sc_dev,
   6709 			    "unable to create Rx DMA map %d error = %d\n",
   6710 			    i, error);
   6711 			goto fail;
   6712 		}
   6713 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6714 	}
   6715 
   6716 	return 0;
   6717 
   6718  fail:
   6719 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6720 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6721 			bus_dmamap_destroy(sc->sc_dmat,
   6722 			    rxq->rxq_soft[i].rxs_dmamap);
   6723 	}
   6724 	return error;
   6725 }
   6726 
   6727 static void
   6728 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6729 {
   6730 	int i;
   6731 
   6732 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6733 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6734 			bus_dmamap_destroy(sc->sc_dmat,
   6735 			    rxq->rxq_soft[i].rxs_dmamap);
   6736 	}
   6737 }
   6738 
   6739 /*
   6740  * wm_alloc_quques:
   6741  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6742  */
   6743 static int
   6744 wm_alloc_txrx_queues(struct wm_softc *sc)
   6745 {
   6746 	int i, error, tx_done, rx_done;
   6747 
   6748 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6749 	    KM_SLEEP);
   6750 	if (sc->sc_queue == NULL) {
   6751 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6752 		error = ENOMEM;
   6753 		goto fail_0;
   6754 	}
   6755 
   6756 	/* For transmission */
   6757 	error = 0;
   6758 	tx_done = 0;
   6759 	for (i = 0; i < sc->sc_nqueues; i++) {
   6760 #ifdef WM_EVENT_COUNTERS
   6761 		int j;
   6762 		const char *xname;
   6763 #endif
   6764 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6765 		txq->txq_sc = sc;
   6766 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6767 
   6768 		error = wm_alloc_tx_descs(sc, txq);
   6769 		if (error)
   6770 			break;
   6771 		error = wm_alloc_tx_buffer(sc, txq);
   6772 		if (error) {
   6773 			wm_free_tx_descs(sc, txq);
   6774 			break;
   6775 		}
   6776 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6777 		if (txq->txq_interq == NULL) {
   6778 			wm_free_tx_descs(sc, txq);
   6779 			wm_free_tx_buffer(sc, txq);
   6780 			error = ENOMEM;
   6781 			break;
   6782 		}
   6783 
   6784 #ifdef WM_EVENT_COUNTERS
   6785 		xname = device_xname(sc->sc_dev);
   6786 
   6787 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6788 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6789 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6790 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6791 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6792 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6793 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6794 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6795 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6796 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6797 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6798 
   6799 		for (j = 0; j < WM_NTXSEGS; j++) {
   6800 			snprintf(txq->txq_txseg_evcnt_names[j],
   6801 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6802 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6803 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6804 		}
   6805 
   6806 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6807 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6808 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6809 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6810 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6811 #endif /* WM_EVENT_COUNTERS */
   6812 
   6813 		tx_done++;
   6814 	}
   6815 	if (error)
   6816 		goto fail_1;
   6817 
   6818 	/* For receive */
   6819 	error = 0;
   6820 	rx_done = 0;
   6821 	for (i = 0; i < sc->sc_nqueues; i++) {
   6822 #ifdef WM_EVENT_COUNTERS
   6823 		const char *xname;
   6824 #endif
   6825 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6826 		rxq->rxq_sc = sc;
   6827 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6828 
   6829 		error = wm_alloc_rx_descs(sc, rxq);
   6830 		if (error)
   6831 			break;
   6832 
   6833 		error = wm_alloc_rx_buffer(sc, rxq);
   6834 		if (error) {
   6835 			wm_free_rx_descs(sc, rxq);
   6836 			break;
   6837 		}
   6838 
   6839 #ifdef WM_EVENT_COUNTERS
   6840 		xname = device_xname(sc->sc_dev);
   6841 
   6842 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6843 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6844 
   6845 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6846 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6847 #endif /* WM_EVENT_COUNTERS */
   6848 
   6849 		rx_done++;
   6850 	}
   6851 	if (error)
   6852 		goto fail_2;
   6853 
   6854 	for (i = 0; i < sc->sc_nqueues; i++) {
   6855 		char rndname[16];
   6856 
   6857 		snprintf(rndname, sizeof(rndname), "%sTXRX%d",
   6858 		    device_xname(sc->sc_dev), i);
   6859 		rnd_attach_source(&sc->sc_queue[i].rnd_source, rndname,
   6860 		    RND_TYPE_NET, RND_FLAG_DEFAULT);
   6861 	}
   6862 
   6863 	return 0;
   6864 
   6865  fail_2:
   6866 	for (i = 0; i < rx_done; i++) {
   6867 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6868 		wm_free_rx_buffer(sc, rxq);
   6869 		wm_free_rx_descs(sc, rxq);
   6870 		if (rxq->rxq_lock)
   6871 			mutex_obj_free(rxq->rxq_lock);
   6872 	}
   6873  fail_1:
   6874 	for (i = 0; i < tx_done; i++) {
   6875 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6876 		pcq_destroy(txq->txq_interq);
   6877 		wm_free_tx_buffer(sc, txq);
   6878 		wm_free_tx_descs(sc, txq);
   6879 		if (txq->txq_lock)
   6880 			mutex_obj_free(txq->txq_lock);
   6881 	}
   6882 
   6883 	kmem_free(sc->sc_queue,
   6884 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6885  fail_0:
   6886 	return error;
   6887 }
   6888 
   6889 /*
   6890  * wm_free_quques:
   6891  *	Free {tx,rx}descs and {tx,rx} buffers
   6892  */
   6893 static void
   6894 wm_free_txrx_queues(struct wm_softc *sc)
   6895 {
   6896 	int i;
   6897 
   6898 	for (i = 0; i < sc->sc_nqueues; i++)
   6899 		rnd_detach_source(&sc->sc_queue[i].rnd_source);
   6900 
   6901 	for (i = 0; i < sc->sc_nqueues; i++) {
   6902 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6903 
   6904 #ifdef WM_EVENT_COUNTERS
   6905 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6906 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6907 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6908 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6909 #endif /* WM_EVENT_COUNTERS */
   6910 
   6911 		wm_free_rx_buffer(sc, rxq);
   6912 		wm_free_rx_descs(sc, rxq);
   6913 		if (rxq->rxq_lock)
   6914 			mutex_obj_free(rxq->rxq_lock);
   6915 	}
   6916 
   6917 	for (i = 0; i < sc->sc_nqueues; i++) {
   6918 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6919 		struct mbuf *m;
   6920 #ifdef WM_EVENT_COUNTERS
   6921 		int j;
   6922 
   6923 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6924 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6925 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6926 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6927 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6928 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6929 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6930 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6931 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6932 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6933 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6934 
   6935 		for (j = 0; j < WM_NTXSEGS; j++)
   6936 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6937 
   6938 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6939 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6940 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6941 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6942 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6943 #endif /* WM_EVENT_COUNTERS */
   6944 
   6945 		/* Drain txq_interq */
   6946 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6947 			m_freem(m);
   6948 		pcq_destroy(txq->txq_interq);
   6949 
   6950 		wm_free_tx_buffer(sc, txq);
   6951 		wm_free_tx_descs(sc, txq);
   6952 		if (txq->txq_lock)
   6953 			mutex_obj_free(txq->txq_lock);
   6954 	}
   6955 
   6956 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6957 }
   6958 
   6959 static void
   6960 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6961 {
   6962 
   6963 	KASSERT(mutex_owned(txq->txq_lock));
   6964 
   6965 	/* Initialize the transmit descriptor ring. */
   6966 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6967 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6968 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6969 	txq->txq_free = WM_NTXDESC(txq);
   6970 	txq->txq_next = 0;
   6971 }
   6972 
   6973 static void
   6974 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6975     struct wm_txqueue *txq)
   6976 {
   6977 
   6978 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6979 		device_xname(sc->sc_dev), __func__));
   6980 	KASSERT(mutex_owned(txq->txq_lock));
   6981 
   6982 	if (sc->sc_type < WM_T_82543) {
   6983 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6984 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6985 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6986 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6987 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6988 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6989 	} else {
   6990 		int qid = wmq->wmq_id;
   6991 
   6992 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6993 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6994 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6995 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6996 
   6997 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6998 			/*
   6999 			 * Don't write TDT before TCTL.EN is set.
   7000 			 * See the document.
   7001 			 */
   7002 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7003 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7004 			    | TXDCTL_WTHRESH(0));
   7005 		else {
   7006 			/* XXX should update with AIM? */
   7007 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7008 			if (sc->sc_type >= WM_T_82540) {
   7009 				/* Should be the same */
   7010 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7011 			}
   7012 
   7013 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7014 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7015 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7016 		}
   7017 	}
   7018 }
   7019 
   7020 static void
   7021 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7022 {
   7023 	int i;
   7024 
   7025 	KASSERT(mutex_owned(txq->txq_lock));
   7026 
   7027 	/* Initialize the transmit job descriptors. */
   7028 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7029 		txq->txq_soft[i].txs_mbuf = NULL;
   7030 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7031 	txq->txq_snext = 0;
   7032 	txq->txq_sdirty = 0;
   7033 }
   7034 
   7035 static void
   7036 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7037     struct wm_txqueue *txq)
   7038 {
   7039 
   7040 	KASSERT(mutex_owned(txq->txq_lock));
   7041 
   7042 	/*
   7043 	 * Set up some register offsets that are different between
   7044 	 * the i82542 and the i82543 and later chips.
   7045 	 */
   7046 	if (sc->sc_type < WM_T_82543)
   7047 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7048 	else
   7049 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7050 
   7051 	wm_init_tx_descs(sc, txq);
   7052 	wm_init_tx_regs(sc, wmq, txq);
   7053 	wm_init_tx_buffer(sc, txq);
   7054 
   7055 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7056 	txq->txq_sending = false;
   7057 }
   7058 
   7059 static void
   7060 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7061     struct wm_rxqueue *rxq)
   7062 {
   7063 
   7064 	KASSERT(mutex_owned(rxq->rxq_lock));
   7065 
   7066 	/*
   7067 	 * Initialize the receive descriptor and receive job
   7068 	 * descriptor rings.
   7069 	 */
   7070 	if (sc->sc_type < WM_T_82543) {
   7071 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7072 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7073 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7074 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7075 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7076 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7077 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7078 
   7079 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7080 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7081 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7082 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7083 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7084 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7085 	} else {
   7086 		int qid = wmq->wmq_id;
   7087 
   7088 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7089 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7090 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7091 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7092 
   7093 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7094 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7095 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7096 
   7097 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7098 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7099 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7100 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7101 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7102 			    | RXDCTL_WTHRESH(1));
   7103 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7104 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7105 		} else {
   7106 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7107 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7108 			/* XXX should update with AIM? */
   7109 			CSR_WRITE(sc, WMREG_RDTR,
   7110 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7111 			/* MUST be same */
   7112 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7113 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7114 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7115 		}
   7116 	}
   7117 }
   7118 
   7119 static int
   7120 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7121 {
   7122 	struct wm_rxsoft *rxs;
   7123 	int error, i;
   7124 
   7125 	KASSERT(mutex_owned(rxq->rxq_lock));
   7126 
   7127 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7128 		rxs = &rxq->rxq_soft[i];
   7129 		if (rxs->rxs_mbuf == NULL) {
   7130 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7131 				log(LOG_ERR, "%s: unable to allocate or map "
   7132 				    "rx buffer %d, error = %d\n",
   7133 				    device_xname(sc->sc_dev), i, error);
   7134 				/*
   7135 				 * XXX Should attempt to run with fewer receive
   7136 				 * XXX buffers instead of just failing.
   7137 				 */
   7138 				wm_rxdrain(rxq);
   7139 				return ENOMEM;
   7140 			}
   7141 		} else {
   7142 			/*
   7143 			 * For 82575 and 82576, the RX descriptors must be
   7144 			 * initialized after the setting of RCTL.EN in
   7145 			 * wm_set_filter()
   7146 			 */
   7147 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7148 				wm_init_rxdesc(rxq, i);
   7149 		}
   7150 	}
   7151 	rxq->rxq_ptr = 0;
   7152 	rxq->rxq_discard = 0;
   7153 	WM_RXCHAIN_RESET(rxq);
   7154 
   7155 	return 0;
   7156 }
   7157 
   7158 static int
   7159 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7160     struct wm_rxqueue *rxq)
   7161 {
   7162 
   7163 	KASSERT(mutex_owned(rxq->rxq_lock));
   7164 
   7165 	/*
   7166 	 * Set up some register offsets that are different between
   7167 	 * the i82542 and the i82543 and later chips.
   7168 	 */
   7169 	if (sc->sc_type < WM_T_82543)
   7170 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7171 	else
   7172 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7173 
   7174 	wm_init_rx_regs(sc, wmq, rxq);
   7175 	return wm_init_rx_buffer(sc, rxq);
   7176 }
   7177 
   7178 /*
   7179  * wm_init_quques:
   7180  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7181  */
   7182 static int
   7183 wm_init_txrx_queues(struct wm_softc *sc)
   7184 {
   7185 	int i, error = 0;
   7186 
   7187 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7188 		device_xname(sc->sc_dev), __func__));
   7189 
   7190 	for (i = 0; i < sc->sc_nqueues; i++) {
   7191 		struct wm_queue *wmq = &sc->sc_queue[i];
   7192 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7193 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7194 
   7195 		/*
   7196 		 * TODO
   7197 		 * Currently, use constant variable instead of AIM.
   7198 		 * Furthermore, the interrupt interval of multiqueue which use
   7199 		 * polling mode is less than default value.
   7200 		 * More tuning and AIM are required.
   7201 		 */
   7202 		if (wm_is_using_multiqueue(sc))
   7203 			wmq->wmq_itr = 50;
   7204 		else
   7205 			wmq->wmq_itr = sc->sc_itr_init;
   7206 		wmq->wmq_set_itr = true;
   7207 
   7208 		mutex_enter(txq->txq_lock);
   7209 		wm_init_tx_queue(sc, wmq, txq);
   7210 		mutex_exit(txq->txq_lock);
   7211 
   7212 		mutex_enter(rxq->rxq_lock);
   7213 		error = wm_init_rx_queue(sc, wmq, rxq);
   7214 		mutex_exit(rxq->rxq_lock);
   7215 		if (error)
   7216 			break;
   7217 	}
   7218 
   7219 	return error;
   7220 }
   7221 
   7222 /*
   7223  * wm_tx_offload:
   7224  *
   7225  *	Set up TCP/IP checksumming parameters for the
   7226  *	specified packet.
   7227  */
   7228 static int
   7229 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7230     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7231 {
   7232 	struct mbuf *m0 = txs->txs_mbuf;
   7233 	struct livengood_tcpip_ctxdesc *t;
   7234 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7235 	uint32_t ipcse;
   7236 	struct ether_header *eh;
   7237 	int offset, iphl;
   7238 	uint8_t fields;
   7239 
   7240 	/*
   7241 	 * XXX It would be nice if the mbuf pkthdr had offset
   7242 	 * fields for the protocol headers.
   7243 	 */
   7244 
   7245 	eh = mtod(m0, struct ether_header *);
   7246 	switch (htons(eh->ether_type)) {
   7247 	case ETHERTYPE_IP:
   7248 	case ETHERTYPE_IPV6:
   7249 		offset = ETHER_HDR_LEN;
   7250 		break;
   7251 
   7252 	case ETHERTYPE_VLAN:
   7253 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7254 		break;
   7255 
   7256 	default:
   7257 		/* Don't support this protocol or encapsulation. */
   7258 		*fieldsp = 0;
   7259 		*cmdp = 0;
   7260 		return 0;
   7261 	}
   7262 
   7263 	if ((m0->m_pkthdr.csum_flags &
   7264 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7265 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7266 	} else
   7267 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7268 
   7269 	ipcse = offset + iphl - 1;
   7270 
   7271 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7272 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7273 	seg = 0;
   7274 	fields = 0;
   7275 
   7276 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7277 		int hlen = offset + iphl;
   7278 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7279 
   7280 		if (__predict_false(m0->m_len <
   7281 				    (hlen + sizeof(struct tcphdr)))) {
   7282 			/*
   7283 			 * TCP/IP headers are not in the first mbuf; we need
   7284 			 * to do this the slow and painful way. Let's just
   7285 			 * hope this doesn't happen very often.
   7286 			 */
   7287 			struct tcphdr th;
   7288 
   7289 			WM_Q_EVCNT_INCR(txq, tsopain);
   7290 
   7291 			m_copydata(m0, hlen, sizeof(th), &th);
   7292 			if (v4) {
   7293 				struct ip ip;
   7294 
   7295 				m_copydata(m0, offset, sizeof(ip), &ip);
   7296 				ip.ip_len = 0;
   7297 				m_copyback(m0,
   7298 				    offset + offsetof(struct ip, ip_len),
   7299 				    sizeof(ip.ip_len), &ip.ip_len);
   7300 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7301 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7302 			} else {
   7303 				struct ip6_hdr ip6;
   7304 
   7305 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7306 				ip6.ip6_plen = 0;
   7307 				m_copyback(m0,
   7308 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7309 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7310 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7311 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7312 			}
   7313 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7314 			    sizeof(th.th_sum), &th.th_sum);
   7315 
   7316 			hlen += th.th_off << 2;
   7317 		} else {
   7318 			/*
   7319 			 * TCP/IP headers are in the first mbuf; we can do
   7320 			 * this the easy way.
   7321 			 */
   7322 			struct tcphdr *th;
   7323 
   7324 			if (v4) {
   7325 				struct ip *ip =
   7326 				    (void *)(mtod(m0, char *) + offset);
   7327 				th = (void *)(mtod(m0, char *) + hlen);
   7328 
   7329 				ip->ip_len = 0;
   7330 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7331 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7332 			} else {
   7333 				struct ip6_hdr *ip6 =
   7334 				    (void *)(mtod(m0, char *) + offset);
   7335 				th = (void *)(mtod(m0, char *) + hlen);
   7336 
   7337 				ip6->ip6_plen = 0;
   7338 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7339 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7340 			}
   7341 			hlen += th->th_off << 2;
   7342 		}
   7343 
   7344 		if (v4) {
   7345 			WM_Q_EVCNT_INCR(txq, tso);
   7346 			cmdlen |= WTX_TCPIP_CMD_IP;
   7347 		} else {
   7348 			WM_Q_EVCNT_INCR(txq, tso6);
   7349 			ipcse = 0;
   7350 		}
   7351 		cmd |= WTX_TCPIP_CMD_TSE;
   7352 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7353 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7354 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7355 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7356 	}
   7357 
   7358 	/*
   7359 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7360 	 * offload feature, if we load the context descriptor, we
   7361 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7362 	 */
   7363 
   7364 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7365 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7366 	    WTX_TCPIP_IPCSE(ipcse);
   7367 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7368 		WM_Q_EVCNT_INCR(txq, ipsum);
   7369 		fields |= WTX_IXSM;
   7370 	}
   7371 
   7372 	offset += iphl;
   7373 
   7374 	if (m0->m_pkthdr.csum_flags &
   7375 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7376 		WM_Q_EVCNT_INCR(txq, tusum);
   7377 		fields |= WTX_TXSM;
   7378 		tucs = WTX_TCPIP_TUCSS(offset) |
   7379 		    WTX_TCPIP_TUCSO(offset +
   7380 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7381 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7382 	} else if ((m0->m_pkthdr.csum_flags &
   7383 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7384 		WM_Q_EVCNT_INCR(txq, tusum6);
   7385 		fields |= WTX_TXSM;
   7386 		tucs = WTX_TCPIP_TUCSS(offset) |
   7387 		    WTX_TCPIP_TUCSO(offset +
   7388 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7389 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7390 	} else {
   7391 		/* Just initialize it to a valid TCP context. */
   7392 		tucs = WTX_TCPIP_TUCSS(offset) |
   7393 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7394 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7395 	}
   7396 
   7397 	/*
   7398 	 * We don't have to write context descriptor for every packet
   7399 	 * except for 82574. For 82574, we must write context descriptor
   7400 	 * for every packet when we use two descriptor queues.
   7401 	 * It would be overhead to write context descriptor for every packet,
   7402 	 * however it does not cause problems.
   7403 	 */
   7404 	/* Fill in the context descriptor. */
   7405 	t = (struct livengood_tcpip_ctxdesc *)
   7406 	    &txq->txq_descs[txq->txq_next];
   7407 	t->tcpip_ipcs = htole32(ipcs);
   7408 	t->tcpip_tucs = htole32(tucs);
   7409 	t->tcpip_cmdlen = htole32(cmdlen);
   7410 	t->tcpip_seg = htole32(seg);
   7411 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7412 
   7413 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7414 	txs->txs_ndesc++;
   7415 
   7416 	*cmdp = cmd;
   7417 	*fieldsp = fields;
   7418 
   7419 	return 0;
   7420 }
   7421 
   7422 static inline int
   7423 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7424 {
   7425 	struct wm_softc *sc = ifp->if_softc;
   7426 	u_int cpuid = cpu_index(curcpu());
   7427 
   7428 	/*
   7429 	 * Currently, simple distribute strategy.
   7430 	 * TODO:
   7431 	 * distribute by flowid(RSS has value).
   7432 	 */
   7433 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7434 }
   7435 
   7436 /*
   7437  * wm_start:		[ifnet interface function]
   7438  *
   7439  *	Start packet transmission on the interface.
   7440  */
   7441 static void
   7442 wm_start(struct ifnet *ifp)
   7443 {
   7444 	struct wm_softc *sc = ifp->if_softc;
   7445 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7446 
   7447 #ifdef WM_MPSAFE
   7448 	KASSERT(if_is_mpsafe(ifp));
   7449 #endif
   7450 	/*
   7451 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7452 	 */
   7453 
   7454 	mutex_enter(txq->txq_lock);
   7455 	if (!txq->txq_stopping)
   7456 		wm_start_locked(ifp);
   7457 	mutex_exit(txq->txq_lock);
   7458 }
   7459 
   7460 static void
   7461 wm_start_locked(struct ifnet *ifp)
   7462 {
   7463 	struct wm_softc *sc = ifp->if_softc;
   7464 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7465 
   7466 	wm_send_common_locked(ifp, txq, false);
   7467 }
   7468 
   7469 static int
   7470 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7471 {
   7472 	int qid;
   7473 	struct wm_softc *sc = ifp->if_softc;
   7474 	struct wm_txqueue *txq;
   7475 
   7476 	qid = wm_select_txqueue(ifp, m);
   7477 	txq = &sc->sc_queue[qid].wmq_txq;
   7478 
   7479 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7480 		m_freem(m);
   7481 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7482 		return ENOBUFS;
   7483 	}
   7484 
   7485 	/* XXX NOMPSAFE: ifp->if_data should be percpu. */
   7486 	ifp->if_obytes += m->m_pkthdr.len;
   7487 	if (m->m_flags & M_MCAST)
   7488 		ifp->if_omcasts++;
   7489 
   7490 	if (mutex_tryenter(txq->txq_lock)) {
   7491 		if (!txq->txq_stopping)
   7492 			wm_transmit_locked(ifp, txq);
   7493 		mutex_exit(txq->txq_lock);
   7494 	}
   7495 
   7496 	return 0;
   7497 }
   7498 
   7499 static void
   7500 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7501 {
   7502 
   7503 	wm_send_common_locked(ifp, txq, true);
   7504 }
   7505 
   7506 static void
   7507 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7508     bool is_transmit)
   7509 {
   7510 	struct wm_softc *sc = ifp->if_softc;
   7511 	struct mbuf *m0;
   7512 	struct wm_txsoft *txs;
   7513 	bus_dmamap_t dmamap;
   7514 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7515 	bus_addr_t curaddr;
   7516 	bus_size_t seglen, curlen;
   7517 	uint32_t cksumcmd;
   7518 	uint8_t cksumfields;
   7519 	bool remap = true;
   7520 
   7521 	KASSERT(mutex_owned(txq->txq_lock));
   7522 
   7523 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7524 		return;
   7525 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7526 		return;
   7527 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7528 		return;
   7529 
   7530 	/* Remember the previous number of free descriptors. */
   7531 	ofree = txq->txq_free;
   7532 
   7533 	/*
   7534 	 * Loop through the send queue, setting up transmit descriptors
   7535 	 * until we drain the queue, or use up all available transmit
   7536 	 * descriptors.
   7537 	 */
   7538 	for (;;) {
   7539 		m0 = NULL;
   7540 
   7541 		/* Get a work queue entry. */
   7542 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7543 			wm_txeof(txq, UINT_MAX);
   7544 			if (txq->txq_sfree == 0) {
   7545 				DPRINTF(WM_DEBUG_TX,
   7546 				    ("%s: TX: no free job descriptors\n",
   7547 					device_xname(sc->sc_dev)));
   7548 				WM_Q_EVCNT_INCR(txq, txsstall);
   7549 				break;
   7550 			}
   7551 		}
   7552 
   7553 		/* Grab a packet off the queue. */
   7554 		if (is_transmit)
   7555 			m0 = pcq_get(txq->txq_interq);
   7556 		else
   7557 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7558 		if (m0 == NULL)
   7559 			break;
   7560 
   7561 		DPRINTF(WM_DEBUG_TX,
   7562 		    ("%s: TX: have packet to transmit: %p\n",
   7563 			device_xname(sc->sc_dev), m0));
   7564 
   7565 		txs = &txq->txq_soft[txq->txq_snext];
   7566 		dmamap = txs->txs_dmamap;
   7567 
   7568 		use_tso = (m0->m_pkthdr.csum_flags &
   7569 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7570 
   7571 		/*
   7572 		 * So says the Linux driver:
   7573 		 * The controller does a simple calculation to make sure
   7574 		 * there is enough room in the FIFO before initiating the
   7575 		 * DMA for each buffer. The calc is:
   7576 		 *	4 = ceil(buffer len / MSS)
   7577 		 * To make sure we don't overrun the FIFO, adjust the max
   7578 		 * buffer len if the MSS drops.
   7579 		 */
   7580 		dmamap->dm_maxsegsz =
   7581 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7582 		    ? m0->m_pkthdr.segsz << 2
   7583 		    : WTX_MAX_LEN;
   7584 
   7585 		/*
   7586 		 * Load the DMA map.  If this fails, the packet either
   7587 		 * didn't fit in the allotted number of segments, or we
   7588 		 * were short on resources.  For the too-many-segments
   7589 		 * case, we simply report an error and drop the packet,
   7590 		 * since we can't sanely copy a jumbo packet to a single
   7591 		 * buffer.
   7592 		 */
   7593 retry:
   7594 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7595 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7596 		if (__predict_false(error)) {
   7597 			if (error == EFBIG) {
   7598 				if (remap == true) {
   7599 					struct mbuf *m;
   7600 
   7601 					remap = false;
   7602 					m = m_defrag(m0, M_NOWAIT);
   7603 					if (m != NULL) {
   7604 						WM_Q_EVCNT_INCR(txq, defrag);
   7605 						m0 = m;
   7606 						goto retry;
   7607 					}
   7608 				}
   7609 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7610 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7611 				    "DMA segments, dropping...\n",
   7612 				    device_xname(sc->sc_dev));
   7613 				wm_dump_mbuf_chain(sc, m0);
   7614 				m_freem(m0);
   7615 				continue;
   7616 			}
   7617 			/* Short on resources, just stop for now. */
   7618 			DPRINTF(WM_DEBUG_TX,
   7619 			    ("%s: TX: dmamap load failed: %d\n",
   7620 				device_xname(sc->sc_dev), error));
   7621 			break;
   7622 		}
   7623 
   7624 		segs_needed = dmamap->dm_nsegs;
   7625 		if (use_tso) {
   7626 			/* For sentinel descriptor; see below. */
   7627 			segs_needed++;
   7628 		}
   7629 
   7630 		/*
   7631 		 * Ensure we have enough descriptors free to describe
   7632 		 * the packet. Note, we always reserve one descriptor
   7633 		 * at the end of the ring due to the semantics of the
   7634 		 * TDT register, plus one more in the event we need
   7635 		 * to load offload context.
   7636 		 */
   7637 		if (segs_needed > txq->txq_free - 2) {
   7638 			/*
   7639 			 * Not enough free descriptors to transmit this
   7640 			 * packet.  We haven't committed anything yet,
   7641 			 * so just unload the DMA map, put the packet
   7642 			 * pack on the queue, and punt. Notify the upper
   7643 			 * layer that there are no more slots left.
   7644 			 */
   7645 			DPRINTF(WM_DEBUG_TX,
   7646 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7647 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7648 				segs_needed, txq->txq_free - 1));
   7649 			if (!is_transmit)
   7650 				ifp->if_flags |= IFF_OACTIVE;
   7651 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7652 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7653 			WM_Q_EVCNT_INCR(txq, txdstall);
   7654 			break;
   7655 		}
   7656 
   7657 		/*
   7658 		 * Check for 82547 Tx FIFO bug. We need to do this
   7659 		 * once we know we can transmit the packet, since we
   7660 		 * do some internal FIFO space accounting here.
   7661 		 */
   7662 		if (sc->sc_type == WM_T_82547 &&
   7663 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7664 			DPRINTF(WM_DEBUG_TX,
   7665 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7666 				device_xname(sc->sc_dev)));
   7667 			if (!is_transmit)
   7668 				ifp->if_flags |= IFF_OACTIVE;
   7669 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7670 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7671 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7672 			break;
   7673 		}
   7674 
   7675 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7676 
   7677 		DPRINTF(WM_DEBUG_TX,
   7678 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7679 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7680 
   7681 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7682 
   7683 		/*
   7684 		 * Store a pointer to the packet so that we can free it
   7685 		 * later.
   7686 		 *
   7687 		 * Initially, we consider the number of descriptors the
   7688 		 * packet uses the number of DMA segments.  This may be
   7689 		 * incremented by 1 if we do checksum offload (a descriptor
   7690 		 * is used to set the checksum context).
   7691 		 */
   7692 		txs->txs_mbuf = m0;
   7693 		txs->txs_firstdesc = txq->txq_next;
   7694 		txs->txs_ndesc = segs_needed;
   7695 
   7696 		/* Set up offload parameters for this packet. */
   7697 		if (m0->m_pkthdr.csum_flags &
   7698 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7699 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7700 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7701 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7702 					  &cksumfields) != 0) {
   7703 				/* Error message already displayed. */
   7704 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7705 				continue;
   7706 			}
   7707 		} else {
   7708 			cksumcmd = 0;
   7709 			cksumfields = 0;
   7710 		}
   7711 
   7712 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7713 
   7714 		/* Sync the DMA map. */
   7715 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7716 		    BUS_DMASYNC_PREWRITE);
   7717 
   7718 		/* Initialize the transmit descriptor. */
   7719 		for (nexttx = txq->txq_next, seg = 0;
   7720 		     seg < dmamap->dm_nsegs; seg++) {
   7721 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7722 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7723 			     seglen != 0;
   7724 			     curaddr += curlen, seglen -= curlen,
   7725 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7726 				curlen = seglen;
   7727 
   7728 				/*
   7729 				 * So says the Linux driver:
   7730 				 * Work around for premature descriptor
   7731 				 * write-backs in TSO mode.  Append a
   7732 				 * 4-byte sentinel descriptor.
   7733 				 */
   7734 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7735 				    curlen > 8)
   7736 					curlen -= 4;
   7737 
   7738 				wm_set_dma_addr(
   7739 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7740 				txq->txq_descs[nexttx].wtx_cmdlen
   7741 				    = htole32(cksumcmd | curlen);
   7742 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7743 				    = 0;
   7744 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7745 				    = cksumfields;
   7746 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7747 				lasttx = nexttx;
   7748 
   7749 				DPRINTF(WM_DEBUG_TX,
   7750 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7751 					"len %#04zx\n",
   7752 					device_xname(sc->sc_dev), nexttx,
   7753 					(uint64_t)curaddr, curlen));
   7754 			}
   7755 		}
   7756 
   7757 		KASSERT(lasttx != -1);
   7758 
   7759 		/*
   7760 		 * Set up the command byte on the last descriptor of
   7761 		 * the packet. If we're in the interrupt delay window,
   7762 		 * delay the interrupt.
   7763 		 */
   7764 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7765 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7766 
   7767 		/*
   7768 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7769 		 * up the descriptor to encapsulate the packet for us.
   7770 		 *
   7771 		 * This is only valid on the last descriptor of the packet.
   7772 		 */
   7773 		if (vlan_has_tag(m0)) {
   7774 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7775 			    htole32(WTX_CMD_VLE);
   7776 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7777 			    = htole16(vlan_get_tag(m0));
   7778 		}
   7779 
   7780 		txs->txs_lastdesc = lasttx;
   7781 
   7782 		DPRINTF(WM_DEBUG_TX,
   7783 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7784 			device_xname(sc->sc_dev),
   7785 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7786 
   7787 		/* Sync the descriptors we're using. */
   7788 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7789 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7790 
   7791 		/* Give the packet to the chip. */
   7792 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7793 
   7794 		DPRINTF(WM_DEBUG_TX,
   7795 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7796 
   7797 		DPRINTF(WM_DEBUG_TX,
   7798 		    ("%s: TX: finished transmitting packet, job %d\n",
   7799 			device_xname(sc->sc_dev), txq->txq_snext));
   7800 
   7801 		/* Advance the tx pointer. */
   7802 		txq->txq_free -= txs->txs_ndesc;
   7803 		txq->txq_next = nexttx;
   7804 
   7805 		txq->txq_sfree--;
   7806 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7807 
   7808 		/* Pass the packet to any BPF listeners. */
   7809 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7810 	}
   7811 
   7812 	if (m0 != NULL) {
   7813 		if (!is_transmit)
   7814 			ifp->if_flags |= IFF_OACTIVE;
   7815 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7816 		WM_Q_EVCNT_INCR(txq, descdrop);
   7817 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7818 			__func__));
   7819 		m_freem(m0);
   7820 	}
   7821 
   7822 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7823 		/* No more slots; notify upper layer. */
   7824 		if (!is_transmit)
   7825 			ifp->if_flags |= IFF_OACTIVE;
   7826 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7827 	}
   7828 
   7829 	if (txq->txq_free != ofree) {
   7830 		/* Set a watchdog timer in case the chip flakes out. */
   7831 		txq->txq_lastsent = time_uptime;
   7832 		txq->txq_sending = true;
   7833 	}
   7834 }
   7835 
   7836 /*
   7837  * wm_nq_tx_offload:
   7838  *
   7839  *	Set up TCP/IP checksumming parameters for the
   7840  *	specified packet, for NEWQUEUE devices
   7841  */
   7842 static int
   7843 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7844     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7845 {
   7846 	struct mbuf *m0 = txs->txs_mbuf;
   7847 	uint32_t vl_len, mssidx, cmdc;
   7848 	struct ether_header *eh;
   7849 	int offset, iphl;
   7850 
   7851 	/*
   7852 	 * XXX It would be nice if the mbuf pkthdr had offset
   7853 	 * fields for the protocol headers.
   7854 	 */
   7855 	*cmdlenp = 0;
   7856 	*fieldsp = 0;
   7857 
   7858 	eh = mtod(m0, struct ether_header *);
   7859 	switch (htons(eh->ether_type)) {
   7860 	case ETHERTYPE_IP:
   7861 	case ETHERTYPE_IPV6:
   7862 		offset = ETHER_HDR_LEN;
   7863 		break;
   7864 
   7865 	case ETHERTYPE_VLAN:
   7866 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7867 		break;
   7868 
   7869 	default:
   7870 		/* Don't support this protocol or encapsulation. */
   7871 		*do_csum = false;
   7872 		return 0;
   7873 	}
   7874 	*do_csum = true;
   7875 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7876 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7877 
   7878 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7879 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7880 
   7881 	if ((m0->m_pkthdr.csum_flags &
   7882 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7883 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7884 	} else {
   7885 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7886 	}
   7887 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7888 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7889 
   7890 	if (vlan_has_tag(m0)) {
   7891 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7892 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7893 		*cmdlenp |= NQTX_CMD_VLE;
   7894 	}
   7895 
   7896 	mssidx = 0;
   7897 
   7898 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7899 		int hlen = offset + iphl;
   7900 		int tcp_hlen;
   7901 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7902 
   7903 		if (__predict_false(m0->m_len <
   7904 				    (hlen + sizeof(struct tcphdr)))) {
   7905 			/*
   7906 			 * TCP/IP headers are not in the first mbuf; we need
   7907 			 * to do this the slow and painful way. Let's just
   7908 			 * hope this doesn't happen very often.
   7909 			 */
   7910 			struct tcphdr th;
   7911 
   7912 			WM_Q_EVCNT_INCR(txq, tsopain);
   7913 
   7914 			m_copydata(m0, hlen, sizeof(th), &th);
   7915 			if (v4) {
   7916 				struct ip ip;
   7917 
   7918 				m_copydata(m0, offset, sizeof(ip), &ip);
   7919 				ip.ip_len = 0;
   7920 				m_copyback(m0,
   7921 				    offset + offsetof(struct ip, ip_len),
   7922 				    sizeof(ip.ip_len), &ip.ip_len);
   7923 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7924 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7925 			} else {
   7926 				struct ip6_hdr ip6;
   7927 
   7928 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7929 				ip6.ip6_plen = 0;
   7930 				m_copyback(m0,
   7931 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7932 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7933 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7934 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7935 			}
   7936 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7937 			    sizeof(th.th_sum), &th.th_sum);
   7938 
   7939 			tcp_hlen = th.th_off << 2;
   7940 		} else {
   7941 			/*
   7942 			 * TCP/IP headers are in the first mbuf; we can do
   7943 			 * this the easy way.
   7944 			 */
   7945 			struct tcphdr *th;
   7946 
   7947 			if (v4) {
   7948 				struct ip *ip =
   7949 				    (void *)(mtod(m0, char *) + offset);
   7950 				th = (void *)(mtod(m0, char *) + hlen);
   7951 
   7952 				ip->ip_len = 0;
   7953 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7954 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7955 			} else {
   7956 				struct ip6_hdr *ip6 =
   7957 				    (void *)(mtod(m0, char *) + offset);
   7958 				th = (void *)(mtod(m0, char *) + hlen);
   7959 
   7960 				ip6->ip6_plen = 0;
   7961 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7962 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7963 			}
   7964 			tcp_hlen = th->th_off << 2;
   7965 		}
   7966 		hlen += tcp_hlen;
   7967 		*cmdlenp |= NQTX_CMD_TSE;
   7968 
   7969 		if (v4) {
   7970 			WM_Q_EVCNT_INCR(txq, tso);
   7971 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7972 		} else {
   7973 			WM_Q_EVCNT_INCR(txq, tso6);
   7974 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7975 		}
   7976 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7977 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7978 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7979 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7980 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7981 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7982 	} else {
   7983 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7984 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7985 	}
   7986 
   7987 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7988 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7989 		cmdc |= NQTXC_CMD_IP4;
   7990 	}
   7991 
   7992 	if (m0->m_pkthdr.csum_flags &
   7993 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7994 		WM_Q_EVCNT_INCR(txq, tusum);
   7995 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7996 			cmdc |= NQTXC_CMD_TCP;
   7997 		else
   7998 			cmdc |= NQTXC_CMD_UDP;
   7999 
   8000 		cmdc |= NQTXC_CMD_IP4;
   8001 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8002 	}
   8003 	if (m0->m_pkthdr.csum_flags &
   8004 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8005 		WM_Q_EVCNT_INCR(txq, tusum6);
   8006 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8007 			cmdc |= NQTXC_CMD_TCP;
   8008 		else
   8009 			cmdc |= NQTXC_CMD_UDP;
   8010 
   8011 		cmdc |= NQTXC_CMD_IP6;
   8012 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8013 	}
   8014 
   8015 	/*
   8016 	 * We don't have to write context descriptor for every packet to
   8017 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8018 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8019 	 * controllers.
   8020 	 * It would be overhead to write context descriptor for every packet,
   8021 	 * however it does not cause problems.
   8022 	 */
   8023 	/* Fill in the context descriptor. */
   8024 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8025 	    htole32(vl_len);
   8026 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8027 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8028 	    htole32(cmdc);
   8029 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8030 	    htole32(mssidx);
   8031 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8032 	DPRINTF(WM_DEBUG_TX,
   8033 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8034 		txq->txq_next, 0, vl_len));
   8035 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8036 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8037 	txs->txs_ndesc++;
   8038 	return 0;
   8039 }
   8040 
   8041 /*
   8042  * wm_nq_start:		[ifnet interface function]
   8043  *
   8044  *	Start packet transmission on the interface for NEWQUEUE devices
   8045  */
   8046 static void
   8047 wm_nq_start(struct ifnet *ifp)
   8048 {
   8049 	struct wm_softc *sc = ifp->if_softc;
   8050 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8051 
   8052 #ifdef WM_MPSAFE
   8053 	KASSERT(if_is_mpsafe(ifp));
   8054 #endif
   8055 	/*
   8056 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   8057 	 */
   8058 
   8059 	mutex_enter(txq->txq_lock);
   8060 	if (!txq->txq_stopping)
   8061 		wm_nq_start_locked(ifp);
   8062 	mutex_exit(txq->txq_lock);
   8063 }
   8064 
   8065 static void
   8066 wm_nq_start_locked(struct ifnet *ifp)
   8067 {
   8068 	struct wm_softc *sc = ifp->if_softc;
   8069 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8070 
   8071 	wm_nq_send_common_locked(ifp, txq, false);
   8072 }
   8073 
   8074 static int
   8075 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8076 {
   8077 	int qid;
   8078 	struct wm_softc *sc = ifp->if_softc;
   8079 	struct wm_txqueue *txq;
   8080 
   8081 	qid = wm_select_txqueue(ifp, m);
   8082 	txq = &sc->sc_queue[qid].wmq_txq;
   8083 
   8084 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8085 		m_freem(m);
   8086 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8087 		return ENOBUFS;
   8088 	}
   8089 
   8090 	/* XXX NOMPSAFE: ifp->if_data should be percpu. */
   8091 	ifp->if_obytes += m->m_pkthdr.len;
   8092 	if (m->m_flags & M_MCAST)
   8093 		ifp->if_omcasts++;
   8094 
   8095 	/*
   8096 	 * The situations which this mutex_tryenter() fails at running time
   8097 	 * are below two patterns.
   8098 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8099 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8100 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8101 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8102 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8103 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8104 	 * stuck, either.
   8105 	 */
   8106 	if (mutex_tryenter(txq->txq_lock)) {
   8107 		if (!txq->txq_stopping)
   8108 			wm_nq_transmit_locked(ifp, txq);
   8109 		mutex_exit(txq->txq_lock);
   8110 	}
   8111 
   8112 	return 0;
   8113 }
   8114 
   8115 static void
   8116 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8117 {
   8118 
   8119 	wm_nq_send_common_locked(ifp, txq, true);
   8120 }
   8121 
   8122 static void
   8123 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8124     bool is_transmit)
   8125 {
   8126 	struct wm_softc *sc = ifp->if_softc;
   8127 	struct mbuf *m0;
   8128 	struct wm_txsoft *txs;
   8129 	bus_dmamap_t dmamap;
   8130 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8131 	bool do_csum, sent;
   8132 	bool remap = true;
   8133 
   8134 	KASSERT(mutex_owned(txq->txq_lock));
   8135 
   8136 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8137 		return;
   8138 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8139 		return;
   8140 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8141 		return;
   8142 
   8143 	sent = false;
   8144 
   8145 	/*
   8146 	 * Loop through the send queue, setting up transmit descriptors
   8147 	 * until we drain the queue, or use up all available transmit
   8148 	 * descriptors.
   8149 	 */
   8150 	for (;;) {
   8151 		m0 = NULL;
   8152 
   8153 		/* Get a work queue entry. */
   8154 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8155 			wm_txeof(txq, UINT_MAX);
   8156 			if (txq->txq_sfree == 0) {
   8157 				DPRINTF(WM_DEBUG_TX,
   8158 				    ("%s: TX: no free job descriptors\n",
   8159 					device_xname(sc->sc_dev)));
   8160 				WM_Q_EVCNT_INCR(txq, txsstall);
   8161 				break;
   8162 			}
   8163 		}
   8164 
   8165 		/* Grab a packet off the queue. */
   8166 		if (is_transmit)
   8167 			m0 = pcq_get(txq->txq_interq);
   8168 		else
   8169 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8170 		if (m0 == NULL)
   8171 			break;
   8172 
   8173 		DPRINTF(WM_DEBUG_TX,
   8174 		    ("%s: TX: have packet to transmit: %p\n",
   8175 		    device_xname(sc->sc_dev), m0));
   8176 
   8177 		txs = &txq->txq_soft[txq->txq_snext];
   8178 		dmamap = txs->txs_dmamap;
   8179 
   8180 		/*
   8181 		 * Load the DMA map.  If this fails, the packet either
   8182 		 * didn't fit in the allotted number of segments, or we
   8183 		 * were short on resources.  For the too-many-segments
   8184 		 * case, we simply report an error and drop the packet,
   8185 		 * since we can't sanely copy a jumbo packet to a single
   8186 		 * buffer.
   8187 		 */
   8188 retry:
   8189 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8190 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8191 		if (__predict_false(error)) {
   8192 			if (error == EFBIG) {
   8193 				if (remap == true) {
   8194 					struct mbuf *m;
   8195 
   8196 					remap = false;
   8197 					m = m_defrag(m0, M_NOWAIT);
   8198 					if (m != NULL) {
   8199 						WM_Q_EVCNT_INCR(txq, defrag);
   8200 						m0 = m;
   8201 						goto retry;
   8202 					}
   8203 				}
   8204 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8205 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8206 				    "DMA segments, dropping...\n",
   8207 				    device_xname(sc->sc_dev));
   8208 				wm_dump_mbuf_chain(sc, m0);
   8209 				m_freem(m0);
   8210 				continue;
   8211 			}
   8212 			/* Short on resources, just stop for now. */
   8213 			DPRINTF(WM_DEBUG_TX,
   8214 			    ("%s: TX: dmamap load failed: %d\n",
   8215 				device_xname(sc->sc_dev), error));
   8216 			break;
   8217 		}
   8218 
   8219 		segs_needed = dmamap->dm_nsegs;
   8220 
   8221 		/*
   8222 		 * Ensure we have enough descriptors free to describe
   8223 		 * the packet. Note, we always reserve one descriptor
   8224 		 * at the end of the ring due to the semantics of the
   8225 		 * TDT register, plus one more in the event we need
   8226 		 * to load offload context.
   8227 		 */
   8228 		if (segs_needed > txq->txq_free - 2) {
   8229 			/*
   8230 			 * Not enough free descriptors to transmit this
   8231 			 * packet.  We haven't committed anything yet,
   8232 			 * so just unload the DMA map, put the packet
   8233 			 * pack on the queue, and punt. Notify the upper
   8234 			 * layer that there are no more slots left.
   8235 			 */
   8236 			DPRINTF(WM_DEBUG_TX,
   8237 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8238 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8239 				segs_needed, txq->txq_free - 1));
   8240 			if (!is_transmit)
   8241 				ifp->if_flags |= IFF_OACTIVE;
   8242 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8243 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8244 			WM_Q_EVCNT_INCR(txq, txdstall);
   8245 			break;
   8246 		}
   8247 
   8248 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8249 
   8250 		DPRINTF(WM_DEBUG_TX,
   8251 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8252 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8253 
   8254 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8255 
   8256 		/*
   8257 		 * Store a pointer to the packet so that we can free it
   8258 		 * later.
   8259 		 *
   8260 		 * Initially, we consider the number of descriptors the
   8261 		 * packet uses the number of DMA segments.  This may be
   8262 		 * incremented by 1 if we do checksum offload (a descriptor
   8263 		 * is used to set the checksum context).
   8264 		 */
   8265 		txs->txs_mbuf = m0;
   8266 		txs->txs_firstdesc = txq->txq_next;
   8267 		txs->txs_ndesc = segs_needed;
   8268 
   8269 		/* Set up offload parameters for this packet. */
   8270 		uint32_t cmdlen, fields, dcmdlen;
   8271 		if (m0->m_pkthdr.csum_flags &
   8272 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8273 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8274 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8275 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8276 			    &do_csum) != 0) {
   8277 				/* Error message already displayed. */
   8278 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8279 				continue;
   8280 			}
   8281 		} else {
   8282 			do_csum = false;
   8283 			cmdlen = 0;
   8284 			fields = 0;
   8285 		}
   8286 
   8287 		/* Sync the DMA map. */
   8288 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8289 		    BUS_DMASYNC_PREWRITE);
   8290 
   8291 		/* Initialize the first transmit descriptor. */
   8292 		nexttx = txq->txq_next;
   8293 		if (!do_csum) {
   8294 			/* Setup a legacy descriptor */
   8295 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8296 			    dmamap->dm_segs[0].ds_addr);
   8297 			txq->txq_descs[nexttx].wtx_cmdlen =
   8298 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8299 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8300 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8301 			if (vlan_has_tag(m0)) {
   8302 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8303 				    htole32(WTX_CMD_VLE);
   8304 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8305 				    htole16(vlan_get_tag(m0));
   8306 			} else
   8307 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8308 
   8309 			dcmdlen = 0;
   8310 		} else {
   8311 			/* Setup an advanced data descriptor */
   8312 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8313 			    htole64(dmamap->dm_segs[0].ds_addr);
   8314 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8315 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8316 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8317 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8318 			    htole32(fields);
   8319 			DPRINTF(WM_DEBUG_TX,
   8320 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8321 				device_xname(sc->sc_dev), nexttx,
   8322 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8323 			DPRINTF(WM_DEBUG_TX,
   8324 			    ("\t 0x%08x%08x\n", fields,
   8325 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8326 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8327 		}
   8328 
   8329 		lasttx = nexttx;
   8330 		nexttx = WM_NEXTTX(txq, nexttx);
   8331 		/*
   8332 		 * Fill in the next descriptors. legacy or advanced format
   8333 		 * is the same here
   8334 		 */
   8335 		for (seg = 1; seg < dmamap->dm_nsegs;
   8336 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8337 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8338 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8339 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8340 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8341 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8342 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8343 			lasttx = nexttx;
   8344 
   8345 			DPRINTF(WM_DEBUG_TX,
   8346 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8347 				device_xname(sc->sc_dev), nexttx,
   8348 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8349 				dmamap->dm_segs[seg].ds_len));
   8350 		}
   8351 
   8352 		KASSERT(lasttx != -1);
   8353 
   8354 		/*
   8355 		 * Set up the command byte on the last descriptor of
   8356 		 * the packet. If we're in the interrupt delay window,
   8357 		 * delay the interrupt.
   8358 		 */
   8359 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8360 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8361 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8362 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8363 
   8364 		txs->txs_lastdesc = lasttx;
   8365 
   8366 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8367 		    device_xname(sc->sc_dev),
   8368 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8369 
   8370 		/* Sync the descriptors we're using. */
   8371 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8372 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8373 
   8374 		/* Give the packet to the chip. */
   8375 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8376 		sent = true;
   8377 
   8378 		DPRINTF(WM_DEBUG_TX,
   8379 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8380 
   8381 		DPRINTF(WM_DEBUG_TX,
   8382 		    ("%s: TX: finished transmitting packet, job %d\n",
   8383 			device_xname(sc->sc_dev), txq->txq_snext));
   8384 
   8385 		/* Advance the tx pointer. */
   8386 		txq->txq_free -= txs->txs_ndesc;
   8387 		txq->txq_next = nexttx;
   8388 
   8389 		txq->txq_sfree--;
   8390 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8391 
   8392 		/* Pass the packet to any BPF listeners. */
   8393 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8394 	}
   8395 
   8396 	if (m0 != NULL) {
   8397 		if (!is_transmit)
   8398 			ifp->if_flags |= IFF_OACTIVE;
   8399 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8400 		WM_Q_EVCNT_INCR(txq, descdrop);
   8401 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8402 			__func__));
   8403 		m_freem(m0);
   8404 	}
   8405 
   8406 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8407 		/* No more slots; notify upper layer. */
   8408 		if (!is_transmit)
   8409 			ifp->if_flags |= IFF_OACTIVE;
   8410 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8411 	}
   8412 
   8413 	if (sent) {
   8414 		/* Set a watchdog timer in case the chip flakes out. */
   8415 		txq->txq_lastsent = time_uptime;
   8416 		txq->txq_sending = true;
   8417 	}
   8418 }
   8419 
   8420 static void
   8421 wm_deferred_start_locked(struct wm_txqueue *txq)
   8422 {
   8423 	struct wm_softc *sc = txq->txq_sc;
   8424 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8425 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8426 	int qid = wmq->wmq_id;
   8427 
   8428 	KASSERT(mutex_owned(txq->txq_lock));
   8429 
   8430 	if (txq->txq_stopping) {
   8431 		mutex_exit(txq->txq_lock);
   8432 		return;
   8433 	}
   8434 
   8435 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8436 		/* XXX need for ALTQ or one CPU system */
   8437 		if (qid == 0)
   8438 			wm_nq_start_locked(ifp);
   8439 		wm_nq_transmit_locked(ifp, txq);
   8440 	} else {
   8441 		/* XXX need for ALTQ or one CPU system */
   8442 		if (qid == 0)
   8443 			wm_start_locked(ifp);
   8444 		wm_transmit_locked(ifp, txq);
   8445 	}
   8446 }
   8447 
   8448 /* Interrupt */
   8449 
   8450 /*
   8451  * wm_txeof:
   8452  *
   8453  *	Helper; handle transmit interrupts.
   8454  */
   8455 static bool
   8456 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8457 {
   8458 	struct wm_softc *sc = txq->txq_sc;
   8459 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8460 	struct wm_txsoft *txs;
   8461 	int count = 0;
   8462 	int i;
   8463 	uint8_t status;
   8464 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8465 	bool more = false;
   8466 
   8467 	KASSERT(mutex_owned(txq->txq_lock));
   8468 
   8469 	if (txq->txq_stopping)
   8470 		return false;
   8471 
   8472 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8473 	/* For ALTQ and legacy(not use multiqueue) ethernet controller */
   8474 	if (wmq->wmq_id == 0)
   8475 		ifp->if_flags &= ~IFF_OACTIVE;
   8476 
   8477 	/*
   8478 	 * Go through the Tx list and free mbufs for those
   8479 	 * frames which have been transmitted.
   8480 	 */
   8481 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8482 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8483 		if (limit-- == 0) {
   8484 			more = true;
   8485 			DPRINTF(WM_DEBUG_TX,
   8486 			    ("%s: TX: loop limited, job %d is not processed\n",
   8487 				device_xname(sc->sc_dev), i));
   8488 			break;
   8489 		}
   8490 
   8491 		txs = &txq->txq_soft[i];
   8492 
   8493 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8494 			device_xname(sc->sc_dev), i));
   8495 
   8496 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8497 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8498 
   8499 		status =
   8500 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8501 		if ((status & WTX_ST_DD) == 0) {
   8502 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8503 			    BUS_DMASYNC_PREREAD);
   8504 			break;
   8505 		}
   8506 
   8507 		count++;
   8508 		DPRINTF(WM_DEBUG_TX,
   8509 		    ("%s: TX: job %d done: descs %d..%d\n",
   8510 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8511 		    txs->txs_lastdesc));
   8512 
   8513 		/*
   8514 		 * XXX We should probably be using the statistics
   8515 		 * XXX registers, but I don't know if they exist
   8516 		 * XXX on chips before the i82544.
   8517 		 */
   8518 
   8519 #ifdef WM_EVENT_COUNTERS
   8520 		if (status & WTX_ST_TU)
   8521 			WM_Q_EVCNT_INCR(txq, underrun);
   8522 #endif /* WM_EVENT_COUNTERS */
   8523 
   8524 		/*
   8525 		 * 82574 and newer's document says the status field has neither
   8526 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8527 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8528 		 * Developer's Manual", 82574 datasheet and newer.
   8529 		 *
   8530 		 * XXX I saw the LC bit was set on I218 even though the media
   8531 		 * was full duplex, so the bit might be used for other
   8532 		 * meaning ...(I have no document).
   8533 		 */
   8534 
   8535 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8536 		    && ((sc->sc_type < WM_T_82574)
   8537 			|| (sc->sc_type == WM_T_80003))) {
   8538 			ifp->if_oerrors++;
   8539 			if (status & WTX_ST_LC)
   8540 				log(LOG_WARNING, "%s: late collision\n",
   8541 				    device_xname(sc->sc_dev));
   8542 			else if (status & WTX_ST_EC) {
   8543 				ifp->if_collisions +=
   8544 				    TX_COLLISION_THRESHOLD + 1;
   8545 				log(LOG_WARNING, "%s: excessive collisions\n",
   8546 				    device_xname(sc->sc_dev));
   8547 			}
   8548 		} else
   8549 			ifp->if_opackets++;
   8550 
   8551 		txq->txq_packets++;
   8552 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8553 
   8554 		txq->txq_free += txs->txs_ndesc;
   8555 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8556 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8557 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8558 		m_freem(txs->txs_mbuf);
   8559 		txs->txs_mbuf = NULL;
   8560 	}
   8561 
   8562 	/* Update the dirty transmit buffer pointer. */
   8563 	txq->txq_sdirty = i;
   8564 	DPRINTF(WM_DEBUG_TX,
   8565 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8566 
   8567 	/*
   8568 	 * If there are no more pending transmissions, cancel the watchdog
   8569 	 * timer.
   8570 	 */
   8571 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8572 		txq->txq_sending = false;
   8573 
   8574 	return more;
   8575 }
   8576 
   8577 static inline uint32_t
   8578 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8579 {
   8580 	struct wm_softc *sc = rxq->rxq_sc;
   8581 
   8582 	if (sc->sc_type == WM_T_82574)
   8583 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8584 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8585 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8586 	else
   8587 		return rxq->rxq_descs[idx].wrx_status;
   8588 }
   8589 
   8590 static inline uint32_t
   8591 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8592 {
   8593 	struct wm_softc *sc = rxq->rxq_sc;
   8594 
   8595 	if (sc->sc_type == WM_T_82574)
   8596 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8597 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8598 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8599 	else
   8600 		return rxq->rxq_descs[idx].wrx_errors;
   8601 }
   8602 
   8603 static inline uint16_t
   8604 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8605 {
   8606 	struct wm_softc *sc = rxq->rxq_sc;
   8607 
   8608 	if (sc->sc_type == WM_T_82574)
   8609 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8610 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8611 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8612 	else
   8613 		return rxq->rxq_descs[idx].wrx_special;
   8614 }
   8615 
   8616 static inline int
   8617 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8618 {
   8619 	struct wm_softc *sc = rxq->rxq_sc;
   8620 
   8621 	if (sc->sc_type == WM_T_82574)
   8622 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8623 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8624 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8625 	else
   8626 		return rxq->rxq_descs[idx].wrx_len;
   8627 }
   8628 
   8629 #ifdef WM_DEBUG
   8630 static inline uint32_t
   8631 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8632 {
   8633 	struct wm_softc *sc = rxq->rxq_sc;
   8634 
   8635 	if (sc->sc_type == WM_T_82574)
   8636 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8637 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8638 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8639 	else
   8640 		return 0;
   8641 }
   8642 
   8643 static inline uint8_t
   8644 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8645 {
   8646 	struct wm_softc *sc = rxq->rxq_sc;
   8647 
   8648 	if (sc->sc_type == WM_T_82574)
   8649 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8650 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8651 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8652 	else
   8653 		return 0;
   8654 }
   8655 #endif /* WM_DEBUG */
   8656 
   8657 static inline bool
   8658 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8659     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8660 {
   8661 
   8662 	if (sc->sc_type == WM_T_82574)
   8663 		return (status & ext_bit) != 0;
   8664 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8665 		return (status & nq_bit) != 0;
   8666 	else
   8667 		return (status & legacy_bit) != 0;
   8668 }
   8669 
   8670 static inline bool
   8671 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8672     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8673 {
   8674 
   8675 	if (sc->sc_type == WM_T_82574)
   8676 		return (error & ext_bit) != 0;
   8677 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8678 		return (error & nq_bit) != 0;
   8679 	else
   8680 		return (error & legacy_bit) != 0;
   8681 }
   8682 
   8683 static inline bool
   8684 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8685 {
   8686 
   8687 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8688 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8689 		return true;
   8690 	else
   8691 		return false;
   8692 }
   8693 
   8694 static inline bool
   8695 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8696 {
   8697 	struct wm_softc *sc = rxq->rxq_sc;
   8698 
   8699 	/* XXX missing error bit for newqueue? */
   8700 	if (wm_rxdesc_is_set_error(sc, errors,
   8701 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8702 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8703 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8704 		NQRXC_ERROR_RXE)) {
   8705 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8706 		    EXTRXC_ERROR_SE, 0))
   8707 			log(LOG_WARNING, "%s: symbol error\n",
   8708 			    device_xname(sc->sc_dev));
   8709 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8710 		    EXTRXC_ERROR_SEQ, 0))
   8711 			log(LOG_WARNING, "%s: receive sequence error\n",
   8712 			    device_xname(sc->sc_dev));
   8713 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8714 		    EXTRXC_ERROR_CE, 0))
   8715 			log(LOG_WARNING, "%s: CRC error\n",
   8716 			    device_xname(sc->sc_dev));
   8717 		return true;
   8718 	}
   8719 
   8720 	return false;
   8721 }
   8722 
   8723 static inline bool
   8724 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8725 {
   8726 	struct wm_softc *sc = rxq->rxq_sc;
   8727 
   8728 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8729 		NQRXC_STATUS_DD)) {
   8730 		/* We have processed all of the receive descriptors. */
   8731 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8732 		return false;
   8733 	}
   8734 
   8735 	return true;
   8736 }
   8737 
   8738 static inline bool
   8739 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8740     uint16_t vlantag, struct mbuf *m)
   8741 {
   8742 
   8743 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8744 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8745 		vlan_set_tag(m, le16toh(vlantag));
   8746 	}
   8747 
   8748 	return true;
   8749 }
   8750 
   8751 static inline void
   8752 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8753     uint32_t errors, struct mbuf *m)
   8754 {
   8755 	struct wm_softc *sc = rxq->rxq_sc;
   8756 
   8757 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8758 		if (wm_rxdesc_is_set_status(sc, status,
   8759 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8760 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8761 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8762 			if (wm_rxdesc_is_set_error(sc, errors,
   8763 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8764 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8765 		}
   8766 		if (wm_rxdesc_is_set_status(sc, status,
   8767 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8768 			/*
   8769 			 * Note: we don't know if this was TCP or UDP,
   8770 			 * so we just set both bits, and expect the
   8771 			 * upper layers to deal.
   8772 			 */
   8773 			WM_Q_EVCNT_INCR(rxq, tusum);
   8774 			m->m_pkthdr.csum_flags |=
   8775 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8776 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8777 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8778 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8779 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8780 		}
   8781 	}
   8782 }
   8783 
   8784 /*
   8785  * wm_rxeof:
   8786  *
   8787  *	Helper; handle receive interrupts.
   8788  */
   8789 static bool
   8790 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8791 {
   8792 	struct wm_softc *sc = rxq->rxq_sc;
   8793 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8794 	struct wm_rxsoft *rxs;
   8795 	struct mbuf *m;
   8796 	int i, len;
   8797 	int count = 0;
   8798 	uint32_t status, errors;
   8799 	uint16_t vlantag;
   8800 	bool more = false;
   8801 
   8802 	KASSERT(mutex_owned(rxq->rxq_lock));
   8803 
   8804 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8805 		if (limit-- == 0) {
   8806 			rxq->rxq_ptr = i;
   8807 			more = true;
   8808 			DPRINTF(WM_DEBUG_RX,
   8809 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8810 				device_xname(sc->sc_dev), i));
   8811 			break;
   8812 		}
   8813 
   8814 		rxs = &rxq->rxq_soft[i];
   8815 
   8816 		DPRINTF(WM_DEBUG_RX,
   8817 		    ("%s: RX: checking descriptor %d\n",
   8818 			device_xname(sc->sc_dev), i));
   8819 		wm_cdrxsync(rxq, i,
   8820 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8821 
   8822 		status = wm_rxdesc_get_status(rxq, i);
   8823 		errors = wm_rxdesc_get_errors(rxq, i);
   8824 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8825 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8826 #ifdef WM_DEBUG
   8827 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8828 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8829 #endif
   8830 
   8831 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8832 			/*
   8833 			 * Update the receive pointer holding rxq_lock
   8834 			 * consistent with increment counter.
   8835 			 */
   8836 			rxq->rxq_ptr = i;
   8837 			break;
   8838 		}
   8839 
   8840 		count++;
   8841 		if (__predict_false(rxq->rxq_discard)) {
   8842 			DPRINTF(WM_DEBUG_RX,
   8843 			    ("%s: RX: discarding contents of descriptor %d\n",
   8844 				device_xname(sc->sc_dev), i));
   8845 			wm_init_rxdesc(rxq, i);
   8846 			if (wm_rxdesc_is_eop(rxq, status)) {
   8847 				/* Reset our state. */
   8848 				DPRINTF(WM_DEBUG_RX,
   8849 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8850 					device_xname(sc->sc_dev)));
   8851 				rxq->rxq_discard = 0;
   8852 			}
   8853 			continue;
   8854 		}
   8855 
   8856 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8857 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8858 
   8859 		m = rxs->rxs_mbuf;
   8860 
   8861 		/*
   8862 		 * Add a new receive buffer to the ring, unless of
   8863 		 * course the length is zero. Treat the latter as a
   8864 		 * failed mapping.
   8865 		 */
   8866 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8867 			/*
   8868 			 * Failed, throw away what we've done so
   8869 			 * far, and discard the rest of the packet.
   8870 			 */
   8871 			ifp->if_ierrors++;
   8872 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8873 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8874 			wm_init_rxdesc(rxq, i);
   8875 			if (!wm_rxdesc_is_eop(rxq, status))
   8876 				rxq->rxq_discard = 1;
   8877 			if (rxq->rxq_head != NULL)
   8878 				m_freem(rxq->rxq_head);
   8879 			WM_RXCHAIN_RESET(rxq);
   8880 			DPRINTF(WM_DEBUG_RX,
   8881 			    ("%s: RX: Rx buffer allocation failed, "
   8882 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8883 				rxq->rxq_discard ? " (discard)" : ""));
   8884 			continue;
   8885 		}
   8886 
   8887 		m->m_len = len;
   8888 		rxq->rxq_len += len;
   8889 		DPRINTF(WM_DEBUG_RX,
   8890 		    ("%s: RX: buffer at %p len %d\n",
   8891 			device_xname(sc->sc_dev), m->m_data, len));
   8892 
   8893 		/* If this is not the end of the packet, keep looking. */
   8894 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8895 			WM_RXCHAIN_LINK(rxq, m);
   8896 			DPRINTF(WM_DEBUG_RX,
   8897 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8898 				device_xname(sc->sc_dev), rxq->rxq_len));
   8899 			continue;
   8900 		}
   8901 
   8902 		/*
   8903 		 * Okay, we have the entire packet now. The chip is
   8904 		 * configured to include the FCS except I350 and I21[01]
   8905 		 * (not all chips can be configured to strip it),
   8906 		 * so we need to trim it.
   8907 		 * May need to adjust length of previous mbuf in the
   8908 		 * chain if the current mbuf is too short.
   8909 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8910 		 * is always set in I350, so we don't trim it.
   8911 		 */
   8912 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8913 		    && (sc->sc_type != WM_T_I210)
   8914 		    && (sc->sc_type != WM_T_I211)) {
   8915 			if (m->m_len < ETHER_CRC_LEN) {
   8916 				rxq->rxq_tail->m_len
   8917 				    -= (ETHER_CRC_LEN - m->m_len);
   8918 				m->m_len = 0;
   8919 			} else
   8920 				m->m_len -= ETHER_CRC_LEN;
   8921 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8922 		} else
   8923 			len = rxq->rxq_len;
   8924 
   8925 		WM_RXCHAIN_LINK(rxq, m);
   8926 
   8927 		*rxq->rxq_tailp = NULL;
   8928 		m = rxq->rxq_head;
   8929 
   8930 		WM_RXCHAIN_RESET(rxq);
   8931 
   8932 		DPRINTF(WM_DEBUG_RX,
   8933 		    ("%s: RX: have entire packet, len -> %d\n",
   8934 			device_xname(sc->sc_dev), len));
   8935 
   8936 		/* If an error occurred, update stats and drop the packet. */
   8937 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8938 			m_freem(m);
   8939 			continue;
   8940 		}
   8941 
   8942 		/* No errors.  Receive the packet. */
   8943 		m_set_rcvif(m, ifp);
   8944 		m->m_pkthdr.len = len;
   8945 		/*
   8946 		 * TODO
   8947 		 * should be save rsshash and rsstype to this mbuf.
   8948 		 */
   8949 		DPRINTF(WM_DEBUG_RX,
   8950 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8951 			device_xname(sc->sc_dev), rsstype, rsshash));
   8952 
   8953 		/*
   8954 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8955 		 * for us.  Associate the tag with the packet.
   8956 		 */
   8957 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8958 			continue;
   8959 
   8960 		/* Set up checksum info for this packet. */
   8961 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8962 		/*
   8963 		 * Update the receive pointer holding rxq_lock consistent with
   8964 		 * increment counter.
   8965 		 */
   8966 		rxq->rxq_ptr = i;
   8967 		rxq->rxq_packets++;
   8968 		rxq->rxq_bytes += len;
   8969 		mutex_exit(rxq->rxq_lock);
   8970 
   8971 		/* Pass it on. */
   8972 		if_percpuq_enqueue(sc->sc_ipq, m);
   8973 
   8974 		mutex_enter(rxq->rxq_lock);
   8975 
   8976 		if (rxq->rxq_stopping)
   8977 			break;
   8978 	}
   8979 
   8980 	DPRINTF(WM_DEBUG_RX,
   8981 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8982 
   8983 	return more;
   8984 }
   8985 
   8986 /*
   8987  * wm_linkintr_gmii:
   8988  *
   8989  *	Helper; handle link interrupts for GMII.
   8990  */
   8991 static void
   8992 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8993 {
   8994 	device_t dev = sc->sc_dev;
   8995 	uint32_t status, reg;
   8996 	bool link;
   8997 	int rv;
   8998 
   8999 	KASSERT(WM_CORE_LOCKED(sc));
   9000 
   9001 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9002 		__func__));
   9003 
   9004 	if ((icr & ICR_LSC) == 0) {
   9005 		if (icr & ICR_RXSEQ)
   9006 			DPRINTF(WM_DEBUG_LINK,
   9007 			    ("%s: LINK Receive sequence error\n",
   9008 				device_xname(dev)));
   9009 		return;
   9010 	}
   9011 
   9012 	/* Link status changed */
   9013 	status = CSR_READ(sc, WMREG_STATUS);
   9014 	link = status & STATUS_LU;
   9015 	if (link) {
   9016 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9017 			device_xname(dev),
   9018 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9019 	} else {
   9020 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9021 			device_xname(dev)));
   9022 	}
   9023 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9024 		wm_gig_downshift_workaround_ich8lan(sc);
   9025 
   9026 	if ((sc->sc_type == WM_T_ICH8)
   9027 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9028 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9029 	}
   9030 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9031 		device_xname(dev)));
   9032 	mii_pollstat(&sc->sc_mii);
   9033 	if (sc->sc_type == WM_T_82543) {
   9034 		int miistatus, active;
   9035 
   9036 		/*
   9037 		 * With 82543, we need to force speed and
   9038 		 * duplex on the MAC equal to what the PHY
   9039 		 * speed and duplex configuration is.
   9040 		 */
   9041 		miistatus = sc->sc_mii.mii_media_status;
   9042 
   9043 		if (miistatus & IFM_ACTIVE) {
   9044 			active = sc->sc_mii.mii_media_active;
   9045 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9046 			switch (IFM_SUBTYPE(active)) {
   9047 			case IFM_10_T:
   9048 				sc->sc_ctrl |= CTRL_SPEED_10;
   9049 				break;
   9050 			case IFM_100_TX:
   9051 				sc->sc_ctrl |= CTRL_SPEED_100;
   9052 				break;
   9053 			case IFM_1000_T:
   9054 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9055 				break;
   9056 			default:
   9057 				/*
   9058 				 * Fiber?
   9059 				 * Shoud not enter here.
   9060 				 */
   9061 				device_printf(dev, "unknown media (%x)\n",
   9062 				    active);
   9063 				break;
   9064 			}
   9065 			if (active & IFM_FDX)
   9066 				sc->sc_ctrl |= CTRL_FD;
   9067 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9068 		}
   9069 	} else if (sc->sc_type == WM_T_PCH) {
   9070 		wm_k1_gig_workaround_hv(sc,
   9071 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9072 	}
   9073 
   9074 	/*
   9075 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9076 	 * aggressive resulting in many collisions. To avoid this, increase
   9077 	 * the IPG and reduce Rx latency in the PHY.
   9078 	 */
   9079 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9080 	    && link) {
   9081 		uint32_t tipg_reg;
   9082 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9083 		bool fdx;
   9084 		uint16_t emi_addr, emi_val;
   9085 
   9086 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9087 		tipg_reg &= ~TIPG_IPGT_MASK;
   9088 		fdx = status & STATUS_FD;
   9089 
   9090 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9091 			tipg_reg |= 0xff;
   9092 			/* Reduce Rx latency in analog PHY */
   9093 			emi_val = 0;
   9094 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9095 		    fdx && speed != STATUS_SPEED_1000) {
   9096 			tipg_reg |= 0xc;
   9097 			emi_val = 1;
   9098 		} else {
   9099 			/* Roll back the default values */
   9100 			tipg_reg |= 0x08;
   9101 			emi_val = 1;
   9102 		}
   9103 
   9104 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9105 
   9106 		rv = sc->phy.acquire(sc);
   9107 		if (rv)
   9108 			return;
   9109 
   9110 		if (sc->sc_type == WM_T_PCH2)
   9111 			emi_addr = I82579_RX_CONFIG;
   9112 		else
   9113 			emi_addr = I217_RX_CONFIG;
   9114 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9115 
   9116 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9117 			uint16_t phy_reg;
   9118 
   9119 			sc->phy.readreg_locked(dev, 2,
   9120 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9121 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9122 			if (speed == STATUS_SPEED_100
   9123 			    || speed == STATUS_SPEED_10)
   9124 				phy_reg |= 0x3e8;
   9125 			else
   9126 				phy_reg |= 0xfa;
   9127 			sc->phy.writereg_locked(dev, 2,
   9128 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9129 
   9130 			if (speed == STATUS_SPEED_1000) {
   9131 				sc->phy.readreg_locked(dev, 2,
   9132 				    HV_PM_CTRL, &phy_reg);
   9133 
   9134 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9135 
   9136 				sc->phy.writereg_locked(dev, 2,
   9137 				    HV_PM_CTRL, phy_reg);
   9138 			}
   9139 		}
   9140 		sc->phy.release(sc);
   9141 
   9142 		if (rv)
   9143 			return;
   9144 
   9145 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9146 			uint16_t data, ptr_gap;
   9147 
   9148 			if (speed == STATUS_SPEED_1000) {
   9149 				rv = sc->phy.acquire(sc);
   9150 				if (rv)
   9151 					return;
   9152 
   9153 				rv = sc->phy.readreg_locked(dev, 2,
   9154 				    I219_UNKNOWN1, &data);
   9155 				if (rv) {
   9156 					sc->phy.release(sc);
   9157 					return;
   9158 				}
   9159 
   9160 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9161 				if (ptr_gap < 0x18) {
   9162 					data &= ~(0x3ff << 2);
   9163 					data |= (0x18 << 2);
   9164 					rv = sc->phy.writereg_locked(dev,
   9165 					    2, I219_UNKNOWN1, data);
   9166 				}
   9167 				sc->phy.release(sc);
   9168 				if (rv)
   9169 					return;
   9170 			} else {
   9171 				rv = sc->phy.acquire(sc);
   9172 				if (rv)
   9173 					return;
   9174 
   9175 				rv = sc->phy.writereg_locked(dev, 2,
   9176 				    I219_UNKNOWN1, 0xc023);
   9177 				sc->phy.release(sc);
   9178 				if (rv)
   9179 					return;
   9180 
   9181 			}
   9182 		}
   9183 	}
   9184 
   9185 	/*
   9186 	 * I217 Packet Loss issue:
   9187 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9188 	 * on power up.
   9189 	 * Set the Beacon Duration for I217 to 8 usec
   9190 	 */
   9191 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9192 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9193 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9194 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9195 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9196 	}
   9197 
   9198 	/* Work-around I218 hang issue */
   9199 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9200 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9201 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9202 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9203 		wm_k1_workaround_lpt_lp(sc, link);
   9204 
   9205 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9206 		/*
   9207 		 * Set platform power management values for Latency
   9208 		 * Tolerance Reporting (LTR)
   9209 		 */
   9210 		wm_platform_pm_pch_lpt(sc,
   9211 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9212 	}
   9213 
   9214 	/* Clear link partner's EEE ability */
   9215 	sc->eee_lp_ability = 0;
   9216 
   9217 	/* FEXTNVM6 K1-off workaround */
   9218 	if (sc->sc_type == WM_T_PCH_SPT) {
   9219 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9220 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9221 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9222 		else
   9223 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9224 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9225 	}
   9226 
   9227 	if (!link)
   9228 		return;
   9229 
   9230 	switch (sc->sc_type) {
   9231 	case WM_T_PCH2:
   9232 		wm_k1_workaround_lv(sc);
   9233 		/* FALLTHROUGH */
   9234 	case WM_T_PCH:
   9235 		if (sc->sc_phytype == WMPHY_82578)
   9236 			wm_link_stall_workaround_hv(sc);
   9237 		break;
   9238 	default:
   9239 		break;
   9240 	}
   9241 
   9242 	/* Enable/Disable EEE after link up */
   9243 	if (sc->sc_phytype > WMPHY_82579)
   9244 		wm_set_eee_pchlan(sc);
   9245 }
   9246 
   9247 /*
   9248  * wm_linkintr_tbi:
   9249  *
   9250  *	Helper; handle link interrupts for TBI mode.
   9251  */
   9252 static void
   9253 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9254 {
   9255 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9256 	uint32_t status;
   9257 
   9258 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9259 		__func__));
   9260 
   9261 	status = CSR_READ(sc, WMREG_STATUS);
   9262 	if (icr & ICR_LSC) {
   9263 		wm_check_for_link(sc);
   9264 		if (status & STATUS_LU) {
   9265 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9266 				device_xname(sc->sc_dev),
   9267 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9268 			/*
   9269 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9270 			 * so we should update sc->sc_ctrl
   9271 			 */
   9272 
   9273 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9274 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9275 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9276 			if (status & STATUS_FD)
   9277 				sc->sc_tctl |=
   9278 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9279 			else
   9280 				sc->sc_tctl |=
   9281 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9282 			if (sc->sc_ctrl & CTRL_TFCE)
   9283 				sc->sc_fcrtl |= FCRTL_XONE;
   9284 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9285 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9286 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9287 			sc->sc_tbi_linkup = 1;
   9288 			if_link_state_change(ifp, LINK_STATE_UP);
   9289 		} else {
   9290 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9291 				device_xname(sc->sc_dev)));
   9292 			sc->sc_tbi_linkup = 0;
   9293 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9294 		}
   9295 		/* Update LED */
   9296 		wm_tbi_serdes_set_linkled(sc);
   9297 	} else if (icr & ICR_RXSEQ)
   9298 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9299 			device_xname(sc->sc_dev)));
   9300 }
   9301 
   9302 /*
   9303  * wm_linkintr_serdes:
   9304  *
   9305  *	Helper; handle link interrupts for TBI mode.
   9306  */
   9307 static void
   9308 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9309 {
   9310 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9311 	struct mii_data *mii = &sc->sc_mii;
   9312 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9313 	uint32_t pcs_adv, pcs_lpab, reg;
   9314 
   9315 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9316 		__func__));
   9317 
   9318 	if (icr & ICR_LSC) {
   9319 		/* Check PCS */
   9320 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9321 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9322 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9323 				device_xname(sc->sc_dev)));
   9324 			mii->mii_media_status |= IFM_ACTIVE;
   9325 			sc->sc_tbi_linkup = 1;
   9326 			if_link_state_change(ifp, LINK_STATE_UP);
   9327 		} else {
   9328 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9329 				device_xname(sc->sc_dev)));
   9330 			mii->mii_media_status |= IFM_NONE;
   9331 			sc->sc_tbi_linkup = 0;
   9332 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9333 			wm_tbi_serdes_set_linkled(sc);
   9334 			return;
   9335 		}
   9336 		mii->mii_media_active |= IFM_1000_SX;
   9337 		if ((reg & PCS_LSTS_FDX) != 0)
   9338 			mii->mii_media_active |= IFM_FDX;
   9339 		else
   9340 			mii->mii_media_active |= IFM_HDX;
   9341 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9342 			/* Check flow */
   9343 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9344 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9345 				DPRINTF(WM_DEBUG_LINK,
   9346 				    ("XXX LINKOK but not ACOMP\n"));
   9347 				return;
   9348 			}
   9349 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9350 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9351 			DPRINTF(WM_DEBUG_LINK,
   9352 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9353 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9354 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9355 				mii->mii_media_active |= IFM_FLOW
   9356 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9357 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9358 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9359 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9360 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9361 				mii->mii_media_active |= IFM_FLOW
   9362 				    | IFM_ETH_TXPAUSE;
   9363 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9364 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9365 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9366 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9367 				mii->mii_media_active |= IFM_FLOW
   9368 				    | IFM_ETH_RXPAUSE;
   9369 		}
   9370 		/* Update LED */
   9371 		wm_tbi_serdes_set_linkled(sc);
   9372 	} else
   9373 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9374 		    device_xname(sc->sc_dev)));
   9375 }
   9376 
   9377 /*
   9378  * wm_linkintr:
   9379  *
   9380  *	Helper; handle link interrupts.
   9381  */
   9382 static void
   9383 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9384 {
   9385 
   9386 	KASSERT(WM_CORE_LOCKED(sc));
   9387 
   9388 	if (sc->sc_flags & WM_F_HAS_MII)
   9389 		wm_linkintr_gmii(sc, icr);
   9390 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9391 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9392 		wm_linkintr_serdes(sc, icr);
   9393 	else
   9394 		wm_linkintr_tbi(sc, icr);
   9395 }
   9396 
   9397 /*
   9398  * wm_intr_legacy:
   9399  *
   9400  *	Interrupt service routine for INTx and MSI.
   9401  */
   9402 static int
   9403 wm_intr_legacy(void *arg)
   9404 {
   9405 	struct wm_softc *sc = arg;
   9406 	struct wm_queue *wmq = &sc->sc_queue[0];
   9407 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9408 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9409 	uint32_t icr, rndval = 0;
   9410 	int handled = 0;
   9411 
   9412 	while (1 /* CONSTCOND */) {
   9413 		icr = CSR_READ(sc, WMREG_ICR);
   9414 		if ((icr & sc->sc_icr) == 0)
   9415 			break;
   9416 		if (handled == 0)
   9417 			DPRINTF(WM_DEBUG_TX,
   9418 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9419 		if (rndval == 0)
   9420 			rndval = icr;
   9421 
   9422 		mutex_enter(rxq->rxq_lock);
   9423 
   9424 		if (rxq->rxq_stopping) {
   9425 			mutex_exit(rxq->rxq_lock);
   9426 			break;
   9427 		}
   9428 
   9429 		handled = 1;
   9430 
   9431 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9432 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9433 			DPRINTF(WM_DEBUG_RX,
   9434 			    ("%s: RX: got Rx intr 0x%08x\n",
   9435 				device_xname(sc->sc_dev),
   9436 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9437 			WM_Q_EVCNT_INCR(rxq, intr);
   9438 		}
   9439 #endif
   9440 		/*
   9441 		 * wm_rxeof() does *not* call upper layer functions directly,
   9442 		 * as if_percpuq_enqueue() just call softint_schedule().
   9443 		 * So, we can call wm_rxeof() in interrupt context.
   9444 		 */
   9445 		wm_rxeof(rxq, UINT_MAX);
   9446 		/* Fill lower bits with RX index. See below for the upper. */
   9447 		rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9448 
   9449 		mutex_exit(rxq->rxq_lock);
   9450 		mutex_enter(txq->txq_lock);
   9451 
   9452 		if (txq->txq_stopping) {
   9453 			mutex_exit(txq->txq_lock);
   9454 			break;
   9455 		}
   9456 
   9457 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9458 		if (icr & ICR_TXDW) {
   9459 			DPRINTF(WM_DEBUG_TX,
   9460 			    ("%s: TX: got TXDW interrupt\n",
   9461 				device_xname(sc->sc_dev)));
   9462 			WM_Q_EVCNT_INCR(txq, txdw);
   9463 		}
   9464 #endif
   9465 		wm_txeof(txq, UINT_MAX);
   9466 		/* Fill upper bits with TX index. See above for the lower. */
   9467 		rndval = txq->txq_next * WM_NRXDESC;
   9468 
   9469 		mutex_exit(txq->txq_lock);
   9470 		WM_CORE_LOCK(sc);
   9471 
   9472 		if (sc->sc_core_stopping) {
   9473 			WM_CORE_UNLOCK(sc);
   9474 			break;
   9475 		}
   9476 
   9477 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9478 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9479 			wm_linkintr(sc, icr);
   9480 		}
   9481 		if ((icr & ICR_GPI(0)) != 0)
   9482 			device_printf(sc->sc_dev, "got module interrupt\n");
   9483 
   9484 		WM_CORE_UNLOCK(sc);
   9485 
   9486 		if (icr & ICR_RXO) {
   9487 #if defined(WM_DEBUG)
   9488 			log(LOG_WARNING, "%s: Receive overrun\n",
   9489 			    device_xname(sc->sc_dev));
   9490 #endif /* defined(WM_DEBUG) */
   9491 		}
   9492 	}
   9493 
   9494 	rnd_add_uint32(&sc->sc_queue[0].rnd_source, rndval);
   9495 
   9496 	if (handled) {
   9497 		/* Try to get more packets going. */
   9498 		softint_schedule(wmq->wmq_si);
   9499 	}
   9500 
   9501 	return handled;
   9502 }
   9503 
   9504 static inline void
   9505 wm_txrxintr_disable(struct wm_queue *wmq)
   9506 {
   9507 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9508 
   9509 	if (sc->sc_type == WM_T_82574)
   9510 		CSR_WRITE(sc, WMREG_IMC,
   9511 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9512 	else if (sc->sc_type == WM_T_82575)
   9513 		CSR_WRITE(sc, WMREG_EIMC,
   9514 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9515 	else
   9516 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9517 }
   9518 
   9519 static inline void
   9520 wm_txrxintr_enable(struct wm_queue *wmq)
   9521 {
   9522 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9523 
   9524 	wm_itrs_calculate(sc, wmq);
   9525 
   9526 	/*
   9527 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9528 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9529 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9530 	 * while each wm_handle_queue(wmq) is runnig.
   9531 	 */
   9532 	if (sc->sc_type == WM_T_82574)
   9533 		CSR_WRITE(sc, WMREG_IMS,
   9534 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9535 	else if (sc->sc_type == WM_T_82575)
   9536 		CSR_WRITE(sc, WMREG_EIMS,
   9537 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9538 	else
   9539 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9540 }
   9541 
   9542 static int
   9543 wm_txrxintr_msix(void *arg)
   9544 {
   9545 	struct wm_queue *wmq = arg;
   9546 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9547 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9548 	struct wm_softc *sc = txq->txq_sc;
   9549 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9550 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9551 	uint32_t rndval = 0;
   9552 	bool txmore;
   9553 	bool rxmore;
   9554 
   9555 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9556 
   9557 	DPRINTF(WM_DEBUG_TX,
   9558 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9559 
   9560 	wm_txrxintr_disable(wmq);
   9561 
   9562 	mutex_enter(txq->txq_lock);
   9563 
   9564 	if (txq->txq_stopping) {
   9565 		mutex_exit(txq->txq_lock);
   9566 		return 0;
   9567 	}
   9568 
   9569 	WM_Q_EVCNT_INCR(txq, txdw);
   9570 	txmore = wm_txeof(txq, txlimit);
   9571 	/* Fill upper bits with TX index. See below for the lower. */
   9572 	rndval = txq->txq_next * WM_NRXDESC;
   9573 	/* wm_deferred start() is done in wm_handle_queue(). */
   9574 	mutex_exit(txq->txq_lock);
   9575 
   9576 	DPRINTF(WM_DEBUG_RX,
   9577 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9578 	mutex_enter(rxq->rxq_lock);
   9579 
   9580 	if (rxq->rxq_stopping) {
   9581 		mutex_exit(rxq->rxq_lock);
   9582 		return 0;
   9583 	}
   9584 
   9585 	WM_Q_EVCNT_INCR(rxq, intr);
   9586 	rxmore = wm_rxeof(rxq, rxlimit);
   9587 
   9588 	/* Fill lower bits with RX index. See above for the upper. */
   9589 	rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9590 	mutex_exit(rxq->rxq_lock);
   9591 
   9592 	wm_itrs_writereg(sc, wmq);
   9593 
   9594 	/*
   9595 	 * This function is called in the hardware interrupt context and
   9596 	 * per-CPU, so it's not required to take a lock.
   9597 	 */
   9598 	if (rndval != 0)
   9599 		rnd_add_uint32(&sc->sc_queue[wmq->wmq_id].rnd_source, rndval);
   9600 
   9601 	if (txmore || rxmore)
   9602 		softint_schedule(wmq->wmq_si);
   9603 	else
   9604 		wm_txrxintr_enable(wmq);
   9605 
   9606 	return 1;
   9607 }
   9608 
   9609 static void
   9610 wm_handle_queue(void *arg)
   9611 {
   9612 	struct wm_queue *wmq = arg;
   9613 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9614 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9615 	struct wm_softc *sc = txq->txq_sc;
   9616 	u_int txlimit = sc->sc_tx_process_limit;
   9617 	u_int rxlimit = sc->sc_rx_process_limit;
   9618 	bool txmore;
   9619 	bool rxmore;
   9620 
   9621 	mutex_enter(txq->txq_lock);
   9622 	if (txq->txq_stopping) {
   9623 		mutex_exit(txq->txq_lock);
   9624 		return;
   9625 	}
   9626 	txmore = wm_txeof(txq, txlimit);
   9627 	wm_deferred_start_locked(txq);
   9628 	mutex_exit(txq->txq_lock);
   9629 
   9630 	mutex_enter(rxq->rxq_lock);
   9631 	if (rxq->rxq_stopping) {
   9632 		mutex_exit(rxq->rxq_lock);
   9633 		return;
   9634 	}
   9635 	WM_Q_EVCNT_INCR(rxq, defer);
   9636 	rxmore = wm_rxeof(rxq, rxlimit);
   9637 	mutex_exit(rxq->rxq_lock);
   9638 
   9639 	if (txmore || rxmore)
   9640 		softint_schedule(wmq->wmq_si);
   9641 	else
   9642 		wm_txrxintr_enable(wmq);
   9643 }
   9644 
   9645 /*
   9646  * wm_linkintr_msix:
   9647  *
   9648  *	Interrupt service routine for link status change for MSI-X.
   9649  */
   9650 static int
   9651 wm_linkintr_msix(void *arg)
   9652 {
   9653 	struct wm_softc *sc = arg;
   9654 	uint32_t reg;
   9655 	bool has_rxo;
   9656 
   9657 	reg = CSR_READ(sc, WMREG_ICR);
   9658 	WM_CORE_LOCK(sc);
   9659 	DPRINTF(WM_DEBUG_LINK,
   9660 	    ("%s: LINK: got link intr. ICR = %08x\n",
   9661 		device_xname(sc->sc_dev), reg));
   9662 
   9663 	if (sc->sc_core_stopping)
   9664 		goto out;
   9665 
   9666 	if ((reg & ICR_LSC) != 0) {
   9667 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9668 		wm_linkintr(sc, ICR_LSC);
   9669 	}
   9670 	if ((reg & ICR_GPI(0)) != 0)
   9671 		device_printf(sc->sc_dev, "got module interrupt\n");
   9672 
   9673 	/*
   9674 	 * XXX 82574 MSI-X mode workaround
   9675 	 *
   9676 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9677 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9678 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9679 	 * interrupts by writing WMREG_ICS to process receive packets.
   9680 	 */
   9681 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9682 #if defined(WM_DEBUG)
   9683 		log(LOG_WARNING, "%s: Receive overrun\n",
   9684 		    device_xname(sc->sc_dev));
   9685 #endif /* defined(WM_DEBUG) */
   9686 
   9687 		has_rxo = true;
   9688 		/*
   9689 		 * The RXO interrupt is very high rate when receive traffic is
   9690 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9691 		 * interrupts. ICR_OTHER will be enabled at the end of
   9692 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9693 		 * ICR_RXQ(1) interrupts.
   9694 		 */
   9695 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9696 
   9697 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9698 	}
   9699 
   9700 
   9701 
   9702 out:
   9703 	WM_CORE_UNLOCK(sc);
   9704 
   9705 	if (sc->sc_type == WM_T_82574) {
   9706 		if (!has_rxo)
   9707 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9708 		else
   9709 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9710 	} else if (sc->sc_type == WM_T_82575)
   9711 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9712 	else
   9713 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9714 
   9715 	return 1;
   9716 }
   9717 
   9718 /*
   9719  * Media related.
   9720  * GMII, SGMII, TBI (and SERDES)
   9721  */
   9722 
   9723 /* Common */
   9724 
   9725 /*
   9726  * wm_tbi_serdes_set_linkled:
   9727  *
   9728  *	Update the link LED on TBI and SERDES devices.
   9729  */
   9730 static void
   9731 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9732 {
   9733 
   9734 	if (sc->sc_tbi_linkup)
   9735 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9736 	else
   9737 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9738 
   9739 	/* 82540 or newer devices are active low */
   9740 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9741 
   9742 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9743 }
   9744 
   9745 /* GMII related */
   9746 
   9747 /*
   9748  * wm_gmii_reset:
   9749  *
   9750  *	Reset the PHY.
   9751  */
   9752 static void
   9753 wm_gmii_reset(struct wm_softc *sc)
   9754 {
   9755 	uint32_t reg;
   9756 	int rv;
   9757 
   9758 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9759 		device_xname(sc->sc_dev), __func__));
   9760 
   9761 	rv = sc->phy.acquire(sc);
   9762 	if (rv != 0) {
   9763 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9764 		    __func__);
   9765 		return;
   9766 	}
   9767 
   9768 	switch (sc->sc_type) {
   9769 	case WM_T_82542_2_0:
   9770 	case WM_T_82542_2_1:
   9771 		/* null */
   9772 		break;
   9773 	case WM_T_82543:
   9774 		/*
   9775 		 * With 82543, we need to force speed and duplex on the MAC
   9776 		 * equal to what the PHY speed and duplex configuration is.
   9777 		 * In addition, we need to perform a hardware reset on the PHY
   9778 		 * to take it out of reset.
   9779 		 */
   9780 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9781 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9782 
   9783 		/* The PHY reset pin is active-low. */
   9784 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9785 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9786 		    CTRL_EXT_SWDPIN(4));
   9787 		reg |= CTRL_EXT_SWDPIO(4);
   9788 
   9789 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9790 		CSR_WRITE_FLUSH(sc);
   9791 		delay(10*1000);
   9792 
   9793 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9794 		CSR_WRITE_FLUSH(sc);
   9795 		delay(150);
   9796 #if 0
   9797 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9798 #endif
   9799 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9800 		break;
   9801 	case WM_T_82544:	/* Reset 10000us */
   9802 	case WM_T_82540:
   9803 	case WM_T_82545:
   9804 	case WM_T_82545_3:
   9805 	case WM_T_82546:
   9806 	case WM_T_82546_3:
   9807 	case WM_T_82541:
   9808 	case WM_T_82541_2:
   9809 	case WM_T_82547:
   9810 	case WM_T_82547_2:
   9811 	case WM_T_82571:	/* Reset 100us */
   9812 	case WM_T_82572:
   9813 	case WM_T_82573:
   9814 	case WM_T_82574:
   9815 	case WM_T_82575:
   9816 	case WM_T_82576:
   9817 	case WM_T_82580:
   9818 	case WM_T_I350:
   9819 	case WM_T_I354:
   9820 	case WM_T_I210:
   9821 	case WM_T_I211:
   9822 	case WM_T_82583:
   9823 	case WM_T_80003:
   9824 		/* Generic reset */
   9825 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9826 		CSR_WRITE_FLUSH(sc);
   9827 		delay(20000);
   9828 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9829 		CSR_WRITE_FLUSH(sc);
   9830 		delay(20000);
   9831 
   9832 		if ((sc->sc_type == WM_T_82541)
   9833 		    || (sc->sc_type == WM_T_82541_2)
   9834 		    || (sc->sc_type == WM_T_82547)
   9835 		    || (sc->sc_type == WM_T_82547_2)) {
   9836 			/* Workaround for igp are done in igp_reset() */
   9837 			/* XXX add code to set LED after phy reset */
   9838 		}
   9839 		break;
   9840 	case WM_T_ICH8:
   9841 	case WM_T_ICH9:
   9842 	case WM_T_ICH10:
   9843 	case WM_T_PCH:
   9844 	case WM_T_PCH2:
   9845 	case WM_T_PCH_LPT:
   9846 	case WM_T_PCH_SPT:
   9847 	case WM_T_PCH_CNP:
   9848 		/* Generic reset */
   9849 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9850 		CSR_WRITE_FLUSH(sc);
   9851 		delay(100);
   9852 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9853 		CSR_WRITE_FLUSH(sc);
   9854 		delay(150);
   9855 		break;
   9856 	default:
   9857 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9858 		    __func__);
   9859 		break;
   9860 	}
   9861 
   9862 	sc->phy.release(sc);
   9863 
   9864 	/* get_cfg_done */
   9865 	wm_get_cfg_done(sc);
   9866 
   9867 	/* Extra setup */
   9868 	switch (sc->sc_type) {
   9869 	case WM_T_82542_2_0:
   9870 	case WM_T_82542_2_1:
   9871 	case WM_T_82543:
   9872 	case WM_T_82544:
   9873 	case WM_T_82540:
   9874 	case WM_T_82545:
   9875 	case WM_T_82545_3:
   9876 	case WM_T_82546:
   9877 	case WM_T_82546_3:
   9878 	case WM_T_82541_2:
   9879 	case WM_T_82547_2:
   9880 	case WM_T_82571:
   9881 	case WM_T_82572:
   9882 	case WM_T_82573:
   9883 	case WM_T_82574:
   9884 	case WM_T_82583:
   9885 	case WM_T_82575:
   9886 	case WM_T_82576:
   9887 	case WM_T_82580:
   9888 	case WM_T_I350:
   9889 	case WM_T_I354:
   9890 	case WM_T_I210:
   9891 	case WM_T_I211:
   9892 	case WM_T_80003:
   9893 		/* Null */
   9894 		break;
   9895 	case WM_T_82541:
   9896 	case WM_T_82547:
   9897 		/* XXX Configure actively LED after PHY reset */
   9898 		break;
   9899 	case WM_T_ICH8:
   9900 	case WM_T_ICH9:
   9901 	case WM_T_ICH10:
   9902 	case WM_T_PCH:
   9903 	case WM_T_PCH2:
   9904 	case WM_T_PCH_LPT:
   9905 	case WM_T_PCH_SPT:
   9906 	case WM_T_PCH_CNP:
   9907 		wm_phy_post_reset(sc);
   9908 		break;
   9909 	default:
   9910 		panic("%s: unknown type\n", __func__);
   9911 		break;
   9912 	}
   9913 }
   9914 
   9915 /*
   9916  * Setup sc_phytype and mii_{read|write}reg.
   9917  *
   9918  *  To identify PHY type, correct read/write function should be selected.
   9919  * To select correct read/write function, PCI ID or MAC type are required
   9920  * without accessing PHY registers.
   9921  *
   9922  *  On the first call of this function, PHY ID is not known yet. Check
   9923  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9924  * result might be incorrect.
   9925  *
   9926  *  In the second call, PHY OUI and model is used to identify PHY type.
   9927  * It might not be perfect because of the lack of compared entry, but it
   9928  * would be better than the first call.
   9929  *
   9930  *  If the detected new result and previous assumption is different,
   9931  * diagnous message will be printed.
   9932  */
   9933 static void
   9934 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9935     uint16_t phy_model)
   9936 {
   9937 	device_t dev = sc->sc_dev;
   9938 	struct mii_data *mii = &sc->sc_mii;
   9939 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9940 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9941 	mii_readreg_t new_readreg;
   9942 	mii_writereg_t new_writereg;
   9943 
   9944 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9945 		device_xname(sc->sc_dev), __func__));
   9946 
   9947 	if (mii->mii_readreg == NULL) {
   9948 		/*
   9949 		 *  This is the first call of this function. For ICH and PCH
   9950 		 * variants, it's difficult to determine the PHY access method
   9951 		 * by sc_type, so use the PCI product ID for some devices.
   9952 		 */
   9953 
   9954 		switch (sc->sc_pcidevid) {
   9955 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9956 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9957 			/* 82577 */
   9958 			new_phytype = WMPHY_82577;
   9959 			break;
   9960 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9961 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9962 			/* 82578 */
   9963 			new_phytype = WMPHY_82578;
   9964 			break;
   9965 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9966 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9967 			/* 82579 */
   9968 			new_phytype = WMPHY_82579;
   9969 			break;
   9970 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9971 		case PCI_PRODUCT_INTEL_82801I_BM:
   9972 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9973 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9974 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9975 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9976 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9977 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9978 			/* ICH8, 9, 10 with 82567 */
   9979 			new_phytype = WMPHY_BM;
   9980 			break;
   9981 		default:
   9982 			break;
   9983 		}
   9984 	} else {
   9985 		/* It's not the first call. Use PHY OUI and model */
   9986 		switch (phy_oui) {
   9987 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9988 			switch (phy_model) {
   9989 			case 0x0004: /* XXX */
   9990 				new_phytype = WMPHY_82578;
   9991 				break;
   9992 			default:
   9993 				break;
   9994 			}
   9995 			break;
   9996 		case MII_OUI_xxMARVELL:
   9997 			switch (phy_model) {
   9998 			case MII_MODEL_xxMARVELL_I210:
   9999 				new_phytype = WMPHY_I210;
   10000 				break;
   10001 			case MII_MODEL_xxMARVELL_E1011:
   10002 			case MII_MODEL_xxMARVELL_E1000_3:
   10003 			case MII_MODEL_xxMARVELL_E1000_5:
   10004 			case MII_MODEL_xxMARVELL_E1112:
   10005 				new_phytype = WMPHY_M88;
   10006 				break;
   10007 			case MII_MODEL_xxMARVELL_E1149:
   10008 				new_phytype = WMPHY_BM;
   10009 				break;
   10010 			case MII_MODEL_xxMARVELL_E1111:
   10011 			case MII_MODEL_xxMARVELL_I347:
   10012 			case MII_MODEL_xxMARVELL_E1512:
   10013 			case MII_MODEL_xxMARVELL_E1340M:
   10014 			case MII_MODEL_xxMARVELL_E1543:
   10015 				new_phytype = WMPHY_M88;
   10016 				break;
   10017 			case MII_MODEL_xxMARVELL_I82563:
   10018 				new_phytype = WMPHY_GG82563;
   10019 				break;
   10020 			default:
   10021 				break;
   10022 			}
   10023 			break;
   10024 		case MII_OUI_INTEL:
   10025 			switch (phy_model) {
   10026 			case MII_MODEL_INTEL_I82577:
   10027 				new_phytype = WMPHY_82577;
   10028 				break;
   10029 			case MII_MODEL_INTEL_I82579:
   10030 				new_phytype = WMPHY_82579;
   10031 				break;
   10032 			case MII_MODEL_INTEL_I217:
   10033 				new_phytype = WMPHY_I217;
   10034 				break;
   10035 			case MII_MODEL_INTEL_I82580:
   10036 			case MII_MODEL_INTEL_I350:
   10037 				new_phytype = WMPHY_82580;
   10038 				break;
   10039 			default:
   10040 				break;
   10041 			}
   10042 			break;
   10043 		case MII_OUI_yyINTEL:
   10044 			switch (phy_model) {
   10045 			case MII_MODEL_yyINTEL_I82562G:
   10046 			case MII_MODEL_yyINTEL_I82562EM:
   10047 			case MII_MODEL_yyINTEL_I82562ET:
   10048 				new_phytype = WMPHY_IFE;
   10049 				break;
   10050 			case MII_MODEL_yyINTEL_IGP01E1000:
   10051 				new_phytype = WMPHY_IGP;
   10052 				break;
   10053 			case MII_MODEL_yyINTEL_I82566:
   10054 				new_phytype = WMPHY_IGP_3;
   10055 				break;
   10056 			default:
   10057 				break;
   10058 			}
   10059 			break;
   10060 		default:
   10061 			break;
   10062 		}
   10063 		if (new_phytype == WMPHY_UNKNOWN)
   10064 			aprint_verbose_dev(dev,
   10065 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   10066 			    __func__, phy_oui, phy_model);
   10067 
   10068 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10069 		    && (sc->sc_phytype != new_phytype )) {
   10070 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10071 			    "was incorrect. PHY type from PHY ID = %u\n",
   10072 			    sc->sc_phytype, new_phytype);
   10073 		}
   10074 	}
   10075 
   10076 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10077 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10078 		/* SGMII */
   10079 		new_readreg = wm_sgmii_readreg;
   10080 		new_writereg = wm_sgmii_writereg;
   10081 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10082 		/* BM2 (phyaddr == 1) */
   10083 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10084 		    && (new_phytype != WMPHY_BM)
   10085 		    && (new_phytype != WMPHY_UNKNOWN))
   10086 			doubt_phytype = new_phytype;
   10087 		new_phytype = WMPHY_BM;
   10088 		new_readreg = wm_gmii_bm_readreg;
   10089 		new_writereg = wm_gmii_bm_writereg;
   10090 	} else if (sc->sc_type >= WM_T_PCH) {
   10091 		/* All PCH* use _hv_ */
   10092 		new_readreg = wm_gmii_hv_readreg;
   10093 		new_writereg = wm_gmii_hv_writereg;
   10094 	} else if (sc->sc_type >= WM_T_ICH8) {
   10095 		/* non-82567 ICH8, 9 and 10 */
   10096 		new_readreg = wm_gmii_i82544_readreg;
   10097 		new_writereg = wm_gmii_i82544_writereg;
   10098 	} else if (sc->sc_type >= WM_T_80003) {
   10099 		/* 80003 */
   10100 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10101 		    && (new_phytype != WMPHY_GG82563)
   10102 		    && (new_phytype != WMPHY_UNKNOWN))
   10103 			doubt_phytype = new_phytype;
   10104 		new_phytype = WMPHY_GG82563;
   10105 		new_readreg = wm_gmii_i80003_readreg;
   10106 		new_writereg = wm_gmii_i80003_writereg;
   10107 	} else if (sc->sc_type >= WM_T_I210) {
   10108 		/* I210 and I211 */
   10109 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10110 		    && (new_phytype != WMPHY_I210)
   10111 		    && (new_phytype != WMPHY_UNKNOWN))
   10112 			doubt_phytype = new_phytype;
   10113 		new_phytype = WMPHY_I210;
   10114 		new_readreg = wm_gmii_gs40g_readreg;
   10115 		new_writereg = wm_gmii_gs40g_writereg;
   10116 	} else if (sc->sc_type >= WM_T_82580) {
   10117 		/* 82580, I350 and I354 */
   10118 		new_readreg = wm_gmii_82580_readreg;
   10119 		new_writereg = wm_gmii_82580_writereg;
   10120 	} else if (sc->sc_type >= WM_T_82544) {
   10121 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10122 		new_readreg = wm_gmii_i82544_readreg;
   10123 		new_writereg = wm_gmii_i82544_writereg;
   10124 	} else {
   10125 		new_readreg = wm_gmii_i82543_readreg;
   10126 		new_writereg = wm_gmii_i82543_writereg;
   10127 	}
   10128 
   10129 	if (new_phytype == WMPHY_BM) {
   10130 		/* All BM use _bm_ */
   10131 		new_readreg = wm_gmii_bm_readreg;
   10132 		new_writereg = wm_gmii_bm_writereg;
   10133 	}
   10134 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10135 		/* All PCH* use _hv_ */
   10136 		new_readreg = wm_gmii_hv_readreg;
   10137 		new_writereg = wm_gmii_hv_writereg;
   10138 	}
   10139 
   10140 	/* Diag output */
   10141 	if (doubt_phytype != WMPHY_UNKNOWN)
   10142 		aprint_error_dev(dev, "Assumed new PHY type was "
   10143 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10144 		    new_phytype);
   10145 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10146 	    && (sc->sc_phytype != new_phytype ))
   10147 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10148 		    "was incorrect. New PHY type = %u\n",
   10149 		    sc->sc_phytype, new_phytype);
   10150 
   10151 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   10152 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   10153 
   10154 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   10155 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   10156 		    "function was incorrect.\n");
   10157 
   10158 	/* Update now */
   10159 	sc->sc_phytype = new_phytype;
   10160 	mii->mii_readreg = new_readreg;
   10161 	mii->mii_writereg = new_writereg;
   10162 	if (new_readreg == wm_gmii_hv_readreg) {
   10163 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10164 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10165 	} else if (new_readreg == wm_sgmii_readreg) {
   10166 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10167 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10168 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10169 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10170 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10171 	}
   10172 }
   10173 
   10174 /*
   10175  * wm_get_phy_id_82575:
   10176  *
   10177  * Return PHY ID. Return -1 if it failed.
   10178  */
   10179 static int
   10180 wm_get_phy_id_82575(struct wm_softc *sc)
   10181 {
   10182 	uint32_t reg;
   10183 	int phyid = -1;
   10184 
   10185 	/* XXX */
   10186 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10187 		return -1;
   10188 
   10189 	if (wm_sgmii_uses_mdio(sc)) {
   10190 		switch (sc->sc_type) {
   10191 		case WM_T_82575:
   10192 		case WM_T_82576:
   10193 			reg = CSR_READ(sc, WMREG_MDIC);
   10194 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10195 			break;
   10196 		case WM_T_82580:
   10197 		case WM_T_I350:
   10198 		case WM_T_I354:
   10199 		case WM_T_I210:
   10200 		case WM_T_I211:
   10201 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10202 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10203 			break;
   10204 		default:
   10205 			return -1;
   10206 		}
   10207 	}
   10208 
   10209 	return phyid;
   10210 }
   10211 
   10212 
   10213 /*
   10214  * wm_gmii_mediainit:
   10215  *
   10216  *	Initialize media for use on 1000BASE-T devices.
   10217  */
   10218 static void
   10219 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10220 {
   10221 	device_t dev = sc->sc_dev;
   10222 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10223 	struct mii_data *mii = &sc->sc_mii;
   10224 	uint32_t reg;
   10225 
   10226 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10227 		device_xname(sc->sc_dev), __func__));
   10228 
   10229 	/* We have GMII. */
   10230 	sc->sc_flags |= WM_F_HAS_MII;
   10231 
   10232 	if (sc->sc_type == WM_T_80003)
   10233 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10234 	else
   10235 		sc->sc_tipg = TIPG_1000T_DFLT;
   10236 
   10237 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10238 	if ((sc->sc_type == WM_T_82580)
   10239 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10240 	    || (sc->sc_type == WM_T_I211)) {
   10241 		reg = CSR_READ(sc, WMREG_PHPM);
   10242 		reg &= ~PHPM_GO_LINK_D;
   10243 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10244 	}
   10245 
   10246 	/*
   10247 	 * Let the chip set speed/duplex on its own based on
   10248 	 * signals from the PHY.
   10249 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10250 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10251 	 */
   10252 	sc->sc_ctrl |= CTRL_SLU;
   10253 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10254 
   10255 	/* Initialize our media structures and probe the GMII. */
   10256 	mii->mii_ifp = ifp;
   10257 
   10258 	mii->mii_statchg = wm_gmii_statchg;
   10259 
   10260 	/* get PHY control from SMBus to PCIe */
   10261 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10262 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10263 	    || (sc->sc_type == WM_T_PCH_CNP))
   10264 		wm_init_phy_workarounds_pchlan(sc);
   10265 
   10266 	wm_gmii_reset(sc);
   10267 
   10268 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10269 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10270 	    wm_gmii_mediastatus);
   10271 
   10272 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10273 	    || (sc->sc_type == WM_T_82580)
   10274 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10275 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10276 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10277 			/* Attach only one port */
   10278 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10279 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10280 		} else {
   10281 			int i, id;
   10282 			uint32_t ctrl_ext;
   10283 
   10284 			id = wm_get_phy_id_82575(sc);
   10285 			if (id != -1) {
   10286 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10287 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10288 			}
   10289 			if ((id == -1)
   10290 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10291 				/* Power on sgmii phy if it is disabled */
   10292 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10293 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10294 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10295 				CSR_WRITE_FLUSH(sc);
   10296 				delay(300*1000); /* XXX too long */
   10297 
   10298 				/* From 1 to 8 */
   10299 				for (i = 1; i < 8; i++)
   10300 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10301 					    0xffffffff, i, MII_OFFSET_ANY,
   10302 					    MIIF_DOPAUSE);
   10303 
   10304 				/* Restore previous sfp cage power state */
   10305 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10306 			}
   10307 		}
   10308 	} else
   10309 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10310 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10311 
   10312 	/*
   10313 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10314 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10315 	 */
   10316 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10317 		|| (sc->sc_type == WM_T_PCH_SPT)
   10318 		|| (sc->sc_type == WM_T_PCH_CNP))
   10319 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10320 		wm_set_mdio_slow_mode_hv(sc);
   10321 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10322 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10323 	}
   10324 
   10325 	/*
   10326 	 * (For ICH8 variants)
   10327 	 * If PHY detection failed, use BM's r/w function and retry.
   10328 	 */
   10329 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10330 		/* if failed, retry with *_bm_* */
   10331 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10332 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10333 		    sc->sc_phytype);
   10334 		sc->sc_phytype = WMPHY_BM;
   10335 		mii->mii_readreg = wm_gmii_bm_readreg;
   10336 		mii->mii_writereg = wm_gmii_bm_writereg;
   10337 
   10338 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10339 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10340 	}
   10341 
   10342 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10343 		/* Any PHY wasn't find */
   10344 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10345 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10346 		sc->sc_phytype = WMPHY_NONE;
   10347 	} else {
   10348 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10349 
   10350 		/*
   10351 		 * PHY Found! Check PHY type again by the second call of
   10352 		 * wm_gmii_setup_phytype.
   10353 		 */
   10354 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10355 		    child->mii_mpd_model);
   10356 
   10357 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10358 	}
   10359 }
   10360 
   10361 /*
   10362  * wm_gmii_mediachange:	[ifmedia interface function]
   10363  *
   10364  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10365  */
   10366 static int
   10367 wm_gmii_mediachange(struct ifnet *ifp)
   10368 {
   10369 	struct wm_softc *sc = ifp->if_softc;
   10370 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10371 	int rc;
   10372 
   10373 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10374 		device_xname(sc->sc_dev), __func__));
   10375 	if ((ifp->if_flags & IFF_UP) == 0)
   10376 		return 0;
   10377 
   10378 	/* Disable D0 LPLU. */
   10379 	wm_lplu_d0_disable(sc);
   10380 
   10381 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10382 	sc->sc_ctrl |= CTRL_SLU;
   10383 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10384 	    || (sc->sc_type > WM_T_82543)) {
   10385 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10386 	} else {
   10387 		sc->sc_ctrl &= ~CTRL_ASDE;
   10388 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10389 		if (ife->ifm_media & IFM_FDX)
   10390 			sc->sc_ctrl |= CTRL_FD;
   10391 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10392 		case IFM_10_T:
   10393 			sc->sc_ctrl |= CTRL_SPEED_10;
   10394 			break;
   10395 		case IFM_100_TX:
   10396 			sc->sc_ctrl |= CTRL_SPEED_100;
   10397 			break;
   10398 		case IFM_1000_T:
   10399 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10400 			break;
   10401 		case IFM_NONE:
   10402 			/* There is no specific setting for IFM_NONE */
   10403 			break;
   10404 		default:
   10405 			panic("wm_gmii_mediachange: bad media 0x%x",
   10406 			    ife->ifm_media);
   10407 		}
   10408 	}
   10409 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10410 	CSR_WRITE_FLUSH(sc);
   10411 	if (sc->sc_type <= WM_T_82543)
   10412 		wm_gmii_reset(sc);
   10413 
   10414 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10415 		return 0;
   10416 	return rc;
   10417 }
   10418 
   10419 /*
   10420  * wm_gmii_mediastatus:	[ifmedia interface function]
   10421  *
   10422  *	Get the current interface media status on a 1000BASE-T device.
   10423  */
   10424 static void
   10425 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10426 {
   10427 	struct wm_softc *sc = ifp->if_softc;
   10428 
   10429 	ether_mediastatus(ifp, ifmr);
   10430 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10431 	    | sc->sc_flowflags;
   10432 }
   10433 
   10434 #define	MDI_IO		CTRL_SWDPIN(2)
   10435 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10436 #define	MDI_CLK		CTRL_SWDPIN(3)
   10437 
   10438 static void
   10439 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10440 {
   10441 	uint32_t i, v;
   10442 
   10443 	v = CSR_READ(sc, WMREG_CTRL);
   10444 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10445 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10446 
   10447 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10448 		if (data & i)
   10449 			v |= MDI_IO;
   10450 		else
   10451 			v &= ~MDI_IO;
   10452 		CSR_WRITE(sc, WMREG_CTRL, v);
   10453 		CSR_WRITE_FLUSH(sc);
   10454 		delay(10);
   10455 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10456 		CSR_WRITE_FLUSH(sc);
   10457 		delay(10);
   10458 		CSR_WRITE(sc, WMREG_CTRL, v);
   10459 		CSR_WRITE_FLUSH(sc);
   10460 		delay(10);
   10461 	}
   10462 }
   10463 
   10464 static uint16_t
   10465 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10466 {
   10467 	uint32_t v, i;
   10468 	uint16_t data = 0;
   10469 
   10470 	v = CSR_READ(sc, WMREG_CTRL);
   10471 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10472 	v |= CTRL_SWDPIO(3);
   10473 
   10474 	CSR_WRITE(sc, WMREG_CTRL, v);
   10475 	CSR_WRITE_FLUSH(sc);
   10476 	delay(10);
   10477 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10478 	CSR_WRITE_FLUSH(sc);
   10479 	delay(10);
   10480 	CSR_WRITE(sc, WMREG_CTRL, v);
   10481 	CSR_WRITE_FLUSH(sc);
   10482 	delay(10);
   10483 
   10484 	for (i = 0; i < 16; i++) {
   10485 		data <<= 1;
   10486 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10487 		CSR_WRITE_FLUSH(sc);
   10488 		delay(10);
   10489 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10490 			data |= 1;
   10491 		CSR_WRITE(sc, WMREG_CTRL, v);
   10492 		CSR_WRITE_FLUSH(sc);
   10493 		delay(10);
   10494 	}
   10495 
   10496 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10497 	CSR_WRITE_FLUSH(sc);
   10498 	delay(10);
   10499 	CSR_WRITE(sc, WMREG_CTRL, v);
   10500 	CSR_WRITE_FLUSH(sc);
   10501 	delay(10);
   10502 
   10503 	return data;
   10504 }
   10505 
   10506 #undef MDI_IO
   10507 #undef MDI_DIR
   10508 #undef MDI_CLK
   10509 
   10510 /*
   10511  * wm_gmii_i82543_readreg:	[mii interface function]
   10512  *
   10513  *	Read a PHY register on the GMII (i82543 version).
   10514  */
   10515 static int
   10516 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10517 {
   10518 	struct wm_softc *sc = device_private(dev);
   10519 
   10520 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10521 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10522 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10523 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10524 
   10525 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10526 		device_xname(dev), phy, reg, *val));
   10527 
   10528 	return 0;
   10529 }
   10530 
   10531 /*
   10532  * wm_gmii_i82543_writereg:	[mii interface function]
   10533  *
   10534  *	Write a PHY register on the GMII (i82543 version).
   10535  */
   10536 static int
   10537 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10538 {
   10539 	struct wm_softc *sc = device_private(dev);
   10540 
   10541 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10542 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10543 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10544 	    (MII_COMMAND_START << 30), 32);
   10545 
   10546 	return 0;
   10547 }
   10548 
   10549 /*
   10550  * wm_gmii_mdic_readreg:	[mii interface function]
   10551  *
   10552  *	Read a PHY register on the GMII.
   10553  */
   10554 static int
   10555 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10556 {
   10557 	struct wm_softc *sc = device_private(dev);
   10558 	uint32_t mdic = 0;
   10559 	int i;
   10560 
   10561 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10562 	    && (reg > MII_ADDRMASK)) {
   10563 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10564 		    __func__, sc->sc_phytype, reg);
   10565 		reg &= MII_ADDRMASK;
   10566 	}
   10567 
   10568 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10569 	    MDIC_REGADD(reg));
   10570 
   10571 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10572 		delay(50);
   10573 		mdic = CSR_READ(sc, WMREG_MDIC);
   10574 		if (mdic & MDIC_READY)
   10575 			break;
   10576 	}
   10577 
   10578 	if ((mdic & MDIC_READY) == 0) {
   10579 		DPRINTF(WM_DEBUG_GMII,
   10580 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10581 			device_xname(dev), phy, reg));
   10582 		return ETIMEDOUT;
   10583 	} else if (mdic & MDIC_E) {
   10584 		/* This is normal if no PHY is present. */
   10585 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10586 			device_xname(sc->sc_dev), phy, reg));
   10587 		return -1;
   10588 	} else
   10589 		*val = MDIC_DATA(mdic);
   10590 
   10591 	/*
   10592 	 * Allow some time after each MDIC transaction to avoid
   10593 	 * reading duplicate data in the next MDIC transaction.
   10594 	 */
   10595 	if (sc->sc_type == WM_T_PCH2)
   10596 		delay(100);
   10597 
   10598 	return 0;
   10599 }
   10600 
   10601 /*
   10602  * wm_gmii_mdic_writereg:	[mii interface function]
   10603  *
   10604  *	Write a PHY register on the GMII.
   10605  */
   10606 static int
   10607 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10608 {
   10609 	struct wm_softc *sc = device_private(dev);
   10610 	uint32_t mdic = 0;
   10611 	int i;
   10612 
   10613 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10614 	    && (reg > MII_ADDRMASK)) {
   10615 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10616 		    __func__, sc->sc_phytype, reg);
   10617 		reg &= MII_ADDRMASK;
   10618 	}
   10619 
   10620 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10621 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10622 
   10623 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10624 		delay(50);
   10625 		mdic = CSR_READ(sc, WMREG_MDIC);
   10626 		if (mdic & MDIC_READY)
   10627 			break;
   10628 	}
   10629 
   10630 	if ((mdic & MDIC_READY) == 0) {
   10631 		DPRINTF(WM_DEBUG_GMII,
   10632 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10633 			device_xname(dev), phy, reg));
   10634 		return ETIMEDOUT;
   10635 	} else if (mdic & MDIC_E) {
   10636 		DPRINTF(WM_DEBUG_GMII,
   10637 		    ("%s: MDIC write error: phy %d reg %d\n",
   10638 			device_xname(dev), phy, reg));
   10639 		return -1;
   10640 	}
   10641 
   10642 	/*
   10643 	 * Allow some time after each MDIC transaction to avoid
   10644 	 * reading duplicate data in the next MDIC transaction.
   10645 	 */
   10646 	if (sc->sc_type == WM_T_PCH2)
   10647 		delay(100);
   10648 
   10649 	return 0;
   10650 }
   10651 
   10652 /*
   10653  * wm_gmii_i82544_readreg:	[mii interface function]
   10654  *
   10655  *	Read a PHY register on the GMII.
   10656  */
   10657 static int
   10658 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10659 {
   10660 	struct wm_softc *sc = device_private(dev);
   10661 	int rv;
   10662 
   10663 	if (sc->phy.acquire(sc)) {
   10664 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10665 		return -1;
   10666 	}
   10667 
   10668 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10669 
   10670 	sc->phy.release(sc);
   10671 
   10672 	return rv;
   10673 }
   10674 
   10675 static int
   10676 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10677 {
   10678 	struct wm_softc *sc = device_private(dev);
   10679 	int rv;
   10680 
   10681 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10682 		switch (sc->sc_phytype) {
   10683 		case WMPHY_IGP:
   10684 		case WMPHY_IGP_2:
   10685 		case WMPHY_IGP_3:
   10686 			rv = wm_gmii_mdic_writereg(dev, phy,
   10687 			    MII_IGPHY_PAGE_SELECT, reg);
   10688 			if (rv != 0)
   10689 				return rv;
   10690 			break;
   10691 		default:
   10692 #ifdef WM_DEBUG
   10693 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10694 			    __func__, sc->sc_phytype, reg);
   10695 #endif
   10696 			break;
   10697 		}
   10698 	}
   10699 
   10700 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10701 }
   10702 
   10703 /*
   10704  * wm_gmii_i82544_writereg:	[mii interface function]
   10705  *
   10706  *	Write a PHY register on the GMII.
   10707  */
   10708 static int
   10709 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10710 {
   10711 	struct wm_softc *sc = device_private(dev);
   10712 	int rv;
   10713 
   10714 	if (sc->phy.acquire(sc)) {
   10715 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10716 		return -1;
   10717 	}
   10718 
   10719 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10720 	sc->phy.release(sc);
   10721 
   10722 	return rv;
   10723 }
   10724 
   10725 static int
   10726 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10727 {
   10728 	struct wm_softc *sc = device_private(dev);
   10729 	int rv;
   10730 
   10731 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10732 		switch (sc->sc_phytype) {
   10733 		case WMPHY_IGP:
   10734 		case WMPHY_IGP_2:
   10735 		case WMPHY_IGP_3:
   10736 			rv = wm_gmii_mdic_writereg(dev, phy,
   10737 			    MII_IGPHY_PAGE_SELECT, reg);
   10738 			if (rv != 0)
   10739 				return rv;
   10740 			break;
   10741 		default:
   10742 #ifdef WM_DEBUG
   10743 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10744 			    __func__, sc->sc_phytype, reg);
   10745 #endif
   10746 			break;
   10747 		}
   10748 	}
   10749 
   10750 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10751 }
   10752 
   10753 /*
   10754  * wm_gmii_i80003_readreg:	[mii interface function]
   10755  *
   10756  *	Read a PHY register on the kumeran
   10757  * This could be handled by the PHY layer if we didn't have to lock the
   10758  * ressource ...
   10759  */
   10760 static int
   10761 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10762 {
   10763 	struct wm_softc *sc = device_private(dev);
   10764 	int page_select;
   10765 	uint16_t temp, temp2;
   10766 	int rv = 0;
   10767 
   10768 	if (phy != 1) /* Only one PHY on kumeran bus */
   10769 		return -1;
   10770 
   10771 	if (sc->phy.acquire(sc)) {
   10772 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10773 		return -1;
   10774 	}
   10775 
   10776 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10777 		page_select = GG82563_PHY_PAGE_SELECT;
   10778 	else {
   10779 		/*
   10780 		 * Use Alternative Page Select register to access registers
   10781 		 * 30 and 31.
   10782 		 */
   10783 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10784 	}
   10785 	temp = reg >> GG82563_PAGE_SHIFT;
   10786 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10787 		goto out;
   10788 
   10789 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10790 		/*
   10791 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10792 		 * register.
   10793 		 */
   10794 		delay(200);
   10795 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10796 		if ((rv != 0) || (temp2 != temp)) {
   10797 			device_printf(dev, "%s failed\n", __func__);
   10798 			rv = -1;
   10799 			goto out;
   10800 		}
   10801 		delay(200);
   10802 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10803 		delay(200);
   10804 	} else
   10805 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10806 
   10807 out:
   10808 	sc->phy.release(sc);
   10809 	return rv;
   10810 }
   10811 
   10812 /*
   10813  * wm_gmii_i80003_writereg:	[mii interface function]
   10814  *
   10815  *	Write a PHY register on the kumeran.
   10816  * This could be handled by the PHY layer if we didn't have to lock the
   10817  * ressource ...
   10818  */
   10819 static int
   10820 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   10821 {
   10822 	struct wm_softc *sc = device_private(dev);
   10823 	int page_select, rv;
   10824 	uint16_t temp, temp2;
   10825 
   10826 	if (phy != 1) /* Only one PHY on kumeran bus */
   10827 		return -1;
   10828 
   10829 	if (sc->phy.acquire(sc)) {
   10830 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10831 		return -1;
   10832 	}
   10833 
   10834 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10835 		page_select = GG82563_PHY_PAGE_SELECT;
   10836 	else {
   10837 		/*
   10838 		 * Use Alternative Page Select register to access registers
   10839 		 * 30 and 31.
   10840 		 */
   10841 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10842 	}
   10843 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10844 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10845 		goto out;
   10846 
   10847 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10848 		/*
   10849 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10850 		 * register.
   10851 		 */
   10852 		delay(200);
   10853 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10854 		if ((rv != 0) || (temp2 != temp)) {
   10855 			device_printf(dev, "%s failed\n", __func__);
   10856 			rv = -1;
   10857 			goto out;
   10858 		}
   10859 		delay(200);
   10860 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10861 		delay(200);
   10862 	} else
   10863 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10864 
   10865 out:
   10866 	sc->phy.release(sc);
   10867 	return rv;
   10868 }
   10869 
   10870 /*
   10871  * wm_gmii_bm_readreg:	[mii interface function]
   10872  *
   10873  *	Read a PHY register on the kumeran
   10874  * This could be handled by the PHY layer if we didn't have to lock the
   10875  * ressource ...
   10876  */
   10877 static int
   10878 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10879 {
   10880 	struct wm_softc *sc = device_private(dev);
   10881 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10882 	int rv;
   10883 
   10884 	if (sc->phy.acquire(sc)) {
   10885 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10886 		return -1;
   10887 	}
   10888 
   10889 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10890 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10891 		    || (reg == 31)) ? 1 : phy;
   10892 	/* Page 800 works differently than the rest so it has its own func */
   10893 	if (page == BM_WUC_PAGE) {
   10894 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   10895 		goto release;
   10896 	}
   10897 
   10898 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10899 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10900 		    && (sc->sc_type != WM_T_82583))
   10901 			rv = wm_gmii_mdic_writereg(dev, phy,
   10902 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10903 		else
   10904 			rv = wm_gmii_mdic_writereg(dev, phy,
   10905 			    BME1000_PHY_PAGE_SELECT, page);
   10906 		if (rv != 0)
   10907 			goto release;
   10908 	}
   10909 
   10910 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10911 
   10912 release:
   10913 	sc->phy.release(sc);
   10914 	return rv;
   10915 }
   10916 
   10917 /*
   10918  * wm_gmii_bm_writereg:	[mii interface function]
   10919  *
   10920  *	Write a PHY register on the kumeran.
   10921  * This could be handled by the PHY layer if we didn't have to lock the
   10922  * ressource ...
   10923  */
   10924 static int
   10925 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   10926 {
   10927 	struct wm_softc *sc = device_private(dev);
   10928 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10929 	int rv;
   10930 
   10931 	if (sc->phy.acquire(sc)) {
   10932 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10933 		return -1;
   10934 	}
   10935 
   10936 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10937 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10938 		    || (reg == 31)) ? 1 : phy;
   10939 	/* Page 800 works differently than the rest so it has its own func */
   10940 	if (page == BM_WUC_PAGE) {
   10941 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   10942 		goto release;
   10943 	}
   10944 
   10945 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10946 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10947 		    && (sc->sc_type != WM_T_82583))
   10948 			rv = wm_gmii_mdic_writereg(dev, phy,
   10949 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10950 		else
   10951 			rv = wm_gmii_mdic_writereg(dev, phy,
   10952 			    BME1000_PHY_PAGE_SELECT, page);
   10953 		if (rv != 0)
   10954 			goto release;
   10955 	}
   10956 
   10957 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10958 
   10959 release:
   10960 	sc->phy.release(sc);
   10961 	return rv;
   10962 }
   10963 
   10964 /*
   10965  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   10966  *  @dev: pointer to the HW structure
   10967  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   10968  *
   10969  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   10970  *  address to store contents of the BM_WUC_ENABLE_REG register.
   10971  */
   10972 static int
   10973 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10974 {
   10975 	uint16_t temp;
   10976 	int rv;
   10977 
   10978 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10979 		device_xname(dev), __func__));
   10980 
   10981 	if (!phy_regp)
   10982 		return -1;
   10983 
   10984 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   10985 
   10986 	/* Select Port Control Registers page */
   10987 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10988 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10989 	if (rv != 0)
   10990 		return rv;
   10991 
   10992 	/* Read WUCE and save it */
   10993 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   10994 	if (rv != 0)
   10995 		return rv;
   10996 
   10997 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   10998 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   10999 	 */
   11000 	temp = *phy_regp;
   11001 	temp |= BM_WUC_ENABLE_BIT;
   11002 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11003 
   11004 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11005 		return rv;
   11006 
   11007 	/* Select Host Wakeup Registers page - caller now able to write
   11008 	 * registers on the Wakeup registers page
   11009 	 */
   11010 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11011 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11012 }
   11013 
   11014 /*
   11015  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11016  *  @dev: pointer to the HW structure
   11017  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11018  *
   11019  *  Restore BM_WUC_ENABLE_REG to its original value.
   11020  *
   11021  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11022  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11023  *  caller.
   11024  */
   11025 static int
   11026 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11027 {
   11028 
   11029 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11030 		device_xname(dev), __func__));
   11031 
   11032 	if (!phy_regp)
   11033 		return -1;
   11034 
   11035 	/* Select Port Control Registers page */
   11036 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11037 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11038 
   11039 	/* Restore 769.17 to its original value */
   11040 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11041 
   11042 	return 0;
   11043 }
   11044 
   11045 /*
   11046  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11047  *  @sc: pointer to the HW structure
   11048  *  @offset: register offset to be read or written
   11049  *  @val: pointer to the data to read or write
   11050  *  @rd: determines if operation is read or write
   11051  *  @page_set: BM_WUC_PAGE already set and access enabled
   11052  *
   11053  *  Read the PHY register at offset and store the retrieved information in
   11054  *  data, or write data to PHY register at offset.  Note the procedure to
   11055  *  access the PHY wakeup registers is different than reading the other PHY
   11056  *  registers. It works as such:
   11057  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11058  *  2) Set page to 800 for host (801 if we were manageability)
   11059  *  3) Write the address using the address opcode (0x11)
   11060  *  4) Read or write the data using the data opcode (0x12)
   11061  *  5) Restore 769.17.2 to its original value
   11062  *
   11063  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11064  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11065  *
   11066  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11067  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11068  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11069  */
   11070 static int
   11071 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11072 	bool page_set)
   11073 {
   11074 	struct wm_softc *sc = device_private(dev);
   11075 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11076 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11077 	uint16_t wuce;
   11078 	int rv = 0;
   11079 
   11080 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11081 		device_xname(dev), __func__));
   11082 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11083 	if ((sc->sc_type == WM_T_PCH)
   11084 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11085 		device_printf(dev,
   11086 		    "Attempting to access page %d while gig enabled.\n", page);
   11087 	}
   11088 
   11089 	if (!page_set) {
   11090 		/* Enable access to PHY wakeup registers */
   11091 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11092 		if (rv != 0) {
   11093 			device_printf(dev,
   11094 			    "%s: Could not enable PHY wakeup reg access\n",
   11095 			    __func__);
   11096 			return rv;
   11097 		}
   11098 	}
   11099 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11100 		device_xname(sc->sc_dev), __func__, page, regnum));
   11101 
   11102 	/*
   11103 	 * 2) Access PHY wakeup register.
   11104 	 * See wm_access_phy_wakeup_reg_bm.
   11105 	 */
   11106 
   11107 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11108 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11109 	if (rv != 0)
   11110 		return rv;
   11111 
   11112 	if (rd) {
   11113 		/* Read the Wakeup register page value using opcode 0x12 */
   11114 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11115 	} else {
   11116 		/* Write the Wakeup register page value using opcode 0x12 */
   11117 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11118 	}
   11119 	if (rv != 0)
   11120 		return rv;
   11121 
   11122 	if (!page_set)
   11123 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11124 
   11125 	return rv;
   11126 }
   11127 
   11128 /*
   11129  * wm_gmii_hv_readreg:	[mii interface function]
   11130  *
   11131  *	Read a PHY register on the kumeran
   11132  * This could be handled by the PHY layer if we didn't have to lock the
   11133  * ressource ...
   11134  */
   11135 static int
   11136 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11137 {
   11138 	struct wm_softc *sc = device_private(dev);
   11139 	int rv;
   11140 
   11141 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11142 		device_xname(dev), __func__));
   11143 	if (sc->phy.acquire(sc)) {
   11144 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11145 		return -1;
   11146 	}
   11147 
   11148 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11149 	sc->phy.release(sc);
   11150 	return rv;
   11151 }
   11152 
   11153 static int
   11154 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11155 {
   11156 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11157 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11158 	int rv;
   11159 
   11160 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11161 
   11162 	/* Page 800 works differently than the rest so it has its own func */
   11163 	if (page == BM_WUC_PAGE)
   11164 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11165 
   11166 	/*
   11167 	 * Lower than page 768 works differently than the rest so it has its
   11168 	 * own func
   11169 	 */
   11170 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11171 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11172 		return -1;
   11173 	}
   11174 
   11175 	/*
   11176 	 * XXX I21[789] documents say that the SMBus Address register is at
   11177 	 * PHY address 01, Page 0 (not 768), Register 26.
   11178 	 */
   11179 	if (page == HV_INTC_FC_PAGE_START)
   11180 		page = 0;
   11181 
   11182 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11183 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11184 		    page << BME1000_PAGE_SHIFT);
   11185 		if (rv != 0)
   11186 			return rv;
   11187 	}
   11188 
   11189 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11190 }
   11191 
   11192 /*
   11193  * wm_gmii_hv_writereg:	[mii interface function]
   11194  *
   11195  *	Write a PHY register on the kumeran.
   11196  * This could be handled by the PHY layer if we didn't have to lock the
   11197  * ressource ...
   11198  */
   11199 static int
   11200 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11201 {
   11202 	struct wm_softc *sc = device_private(dev);
   11203 	int rv;
   11204 
   11205 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11206 		device_xname(dev), __func__));
   11207 
   11208 	if (sc->phy.acquire(sc)) {
   11209 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11210 		return -1;
   11211 	}
   11212 
   11213 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11214 	sc->phy.release(sc);
   11215 
   11216 	return rv;
   11217 }
   11218 
   11219 static int
   11220 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11221 {
   11222 	struct wm_softc *sc = device_private(dev);
   11223 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11224 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11225 	int rv;
   11226 
   11227 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11228 
   11229 	/* Page 800 works differently than the rest so it has its own func */
   11230 	if (page == BM_WUC_PAGE)
   11231 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11232 		    false);
   11233 
   11234 	/*
   11235 	 * Lower than page 768 works differently than the rest so it has its
   11236 	 * own func
   11237 	 */
   11238 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11239 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11240 		return -1;
   11241 	}
   11242 
   11243 	{
   11244 		/*
   11245 		 * XXX I21[789] documents say that the SMBus Address register
   11246 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11247 		 */
   11248 		if (page == HV_INTC_FC_PAGE_START)
   11249 			page = 0;
   11250 
   11251 		/*
   11252 		 * XXX Workaround MDIO accesses being disabled after entering
   11253 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11254 		 * register is set)
   11255 		 */
   11256 		if (sc->sc_phytype == WMPHY_82578) {
   11257 			struct mii_softc *child;
   11258 
   11259 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11260 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11261 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11262 			    && ((val & (1 << 11)) != 0)) {
   11263 				device_printf(dev, "XXX need workaround\n");
   11264 			}
   11265 		}
   11266 
   11267 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11268 			rv = wm_gmii_mdic_writereg(dev, 1,
   11269 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11270 			if (rv != 0)
   11271 				return rv;
   11272 		}
   11273 	}
   11274 
   11275 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11276 }
   11277 
   11278 /*
   11279  * wm_gmii_82580_readreg:	[mii interface function]
   11280  *
   11281  *	Read a PHY register on the 82580 and I350.
   11282  * This could be handled by the PHY layer if we didn't have to lock the
   11283  * ressource ...
   11284  */
   11285 static int
   11286 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11287 {
   11288 	struct wm_softc *sc = device_private(dev);
   11289 	int rv;
   11290 
   11291 	if (sc->phy.acquire(sc) != 0) {
   11292 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11293 		return -1;
   11294 	}
   11295 
   11296 #ifdef DIAGNOSTIC
   11297 	if (reg > MII_ADDRMASK) {
   11298 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11299 		    __func__, sc->sc_phytype, reg);
   11300 		reg &= MII_ADDRMASK;
   11301 	}
   11302 #endif
   11303 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11304 
   11305 	sc->phy.release(sc);
   11306 	return rv;
   11307 }
   11308 
   11309 /*
   11310  * wm_gmii_82580_writereg:	[mii interface function]
   11311  *
   11312  *	Write a PHY register on the 82580 and I350.
   11313  * This could be handled by the PHY layer if we didn't have to lock the
   11314  * ressource ...
   11315  */
   11316 static int
   11317 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11318 {
   11319 	struct wm_softc *sc = device_private(dev);
   11320 	int rv;
   11321 
   11322 	if (sc->phy.acquire(sc) != 0) {
   11323 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11324 		return -1;
   11325 	}
   11326 
   11327 #ifdef DIAGNOSTIC
   11328 	if (reg > MII_ADDRMASK) {
   11329 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11330 		    __func__, sc->sc_phytype, reg);
   11331 		reg &= MII_ADDRMASK;
   11332 	}
   11333 #endif
   11334 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11335 
   11336 	sc->phy.release(sc);
   11337 	return rv;
   11338 }
   11339 
   11340 /*
   11341  * wm_gmii_gs40g_readreg:	[mii interface function]
   11342  *
   11343  *	Read a PHY register on the I2100 and I211.
   11344  * This could be handled by the PHY layer if we didn't have to lock the
   11345  * ressource ...
   11346  */
   11347 static int
   11348 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11349 {
   11350 	struct wm_softc *sc = device_private(dev);
   11351 	int page, offset;
   11352 	int rv;
   11353 
   11354 	/* Acquire semaphore */
   11355 	if (sc->phy.acquire(sc)) {
   11356 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11357 		return -1;
   11358 	}
   11359 
   11360 	/* Page select */
   11361 	page = reg >> GS40G_PAGE_SHIFT;
   11362 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11363 	if (rv != 0)
   11364 		goto release;
   11365 
   11366 	/* Read reg */
   11367 	offset = reg & GS40G_OFFSET_MASK;
   11368 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11369 
   11370 release:
   11371 	sc->phy.release(sc);
   11372 	return rv;
   11373 }
   11374 
   11375 /*
   11376  * wm_gmii_gs40g_writereg:	[mii interface function]
   11377  *
   11378  *	Write a PHY register on the I210 and I211.
   11379  * This could be handled by the PHY layer if we didn't have to lock the
   11380  * ressource ...
   11381  */
   11382 static int
   11383 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11384 {
   11385 	struct wm_softc *sc = device_private(dev);
   11386 	uint16_t page;
   11387 	int offset, rv;
   11388 
   11389 	/* Acquire semaphore */
   11390 	if (sc->phy.acquire(sc)) {
   11391 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11392 		return -1;
   11393 	}
   11394 
   11395 	/* Page select */
   11396 	page = reg >> GS40G_PAGE_SHIFT;
   11397 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11398 	if (rv != 0)
   11399 		goto release;
   11400 
   11401 	/* Write reg */
   11402 	offset = reg & GS40G_OFFSET_MASK;
   11403 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11404 
   11405 release:
   11406 	/* Release semaphore */
   11407 	sc->phy.release(sc);
   11408 	return rv;
   11409 }
   11410 
   11411 /*
   11412  * wm_gmii_statchg:	[mii interface function]
   11413  *
   11414  *	Callback from MII layer when media changes.
   11415  */
   11416 static void
   11417 wm_gmii_statchg(struct ifnet *ifp)
   11418 {
   11419 	struct wm_softc *sc = ifp->if_softc;
   11420 	struct mii_data *mii = &sc->sc_mii;
   11421 
   11422 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11423 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11424 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11425 
   11426 	/* Get flow control negotiation result. */
   11427 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11428 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11429 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11430 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11431 	}
   11432 
   11433 	if (sc->sc_flowflags & IFM_FLOW) {
   11434 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11435 			sc->sc_ctrl |= CTRL_TFCE;
   11436 			sc->sc_fcrtl |= FCRTL_XONE;
   11437 		}
   11438 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11439 			sc->sc_ctrl |= CTRL_RFCE;
   11440 	}
   11441 
   11442 	if (mii->mii_media_active & IFM_FDX) {
   11443 		DPRINTF(WM_DEBUG_LINK,
   11444 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11445 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11446 	} else {
   11447 		DPRINTF(WM_DEBUG_LINK,
   11448 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11449 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11450 	}
   11451 
   11452 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11453 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11454 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11455 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11456 	if (sc->sc_type == WM_T_80003) {
   11457 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11458 		case IFM_1000_T:
   11459 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11460 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11461 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11462 			break;
   11463 		default:
   11464 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11465 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11466 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11467 			break;
   11468 		}
   11469 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11470 	}
   11471 }
   11472 
   11473 /* kumeran related (80003, ICH* and PCH*) */
   11474 
   11475 /*
   11476  * wm_kmrn_readreg:
   11477  *
   11478  *	Read a kumeran register
   11479  */
   11480 static int
   11481 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11482 {
   11483 	int rv;
   11484 
   11485 	if (sc->sc_type == WM_T_80003)
   11486 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11487 	else
   11488 		rv = sc->phy.acquire(sc);
   11489 	if (rv != 0) {
   11490 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11491 		    __func__);
   11492 		return rv;
   11493 	}
   11494 
   11495 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11496 
   11497 	if (sc->sc_type == WM_T_80003)
   11498 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11499 	else
   11500 		sc->phy.release(sc);
   11501 
   11502 	return rv;
   11503 }
   11504 
   11505 static int
   11506 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11507 {
   11508 
   11509 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11510 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11511 	    KUMCTRLSTA_REN);
   11512 	CSR_WRITE_FLUSH(sc);
   11513 	delay(2);
   11514 
   11515 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11516 
   11517 	return 0;
   11518 }
   11519 
   11520 /*
   11521  * wm_kmrn_writereg:
   11522  *
   11523  *	Write a kumeran register
   11524  */
   11525 static int
   11526 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11527 {
   11528 	int rv;
   11529 
   11530 	if (sc->sc_type == WM_T_80003)
   11531 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11532 	else
   11533 		rv = sc->phy.acquire(sc);
   11534 	if (rv != 0) {
   11535 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11536 		    __func__);
   11537 		return rv;
   11538 	}
   11539 
   11540 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11541 
   11542 	if (sc->sc_type == WM_T_80003)
   11543 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11544 	else
   11545 		sc->phy.release(sc);
   11546 
   11547 	return rv;
   11548 }
   11549 
   11550 static int
   11551 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11552 {
   11553 
   11554 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11555 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11556 
   11557 	return 0;
   11558 }
   11559 
   11560 /*
   11561  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11562  * This access method is different from IEEE MMD.
   11563  */
   11564 static int
   11565 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11566 {
   11567 	struct wm_softc *sc = device_private(dev);
   11568 	int rv;
   11569 
   11570 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11571 	if (rv != 0)
   11572 		return rv;
   11573 
   11574 	if (rd)
   11575 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11576 	else
   11577 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11578 	return rv;
   11579 }
   11580 
   11581 static int
   11582 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11583 {
   11584 
   11585 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11586 }
   11587 
   11588 static int
   11589 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11590 {
   11591 
   11592 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11593 }
   11594 
   11595 /* SGMII related */
   11596 
   11597 /*
   11598  * wm_sgmii_uses_mdio
   11599  *
   11600  * Check whether the transaction is to the internal PHY or the external
   11601  * MDIO interface. Return true if it's MDIO.
   11602  */
   11603 static bool
   11604 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11605 {
   11606 	uint32_t reg;
   11607 	bool ismdio = false;
   11608 
   11609 	switch (sc->sc_type) {
   11610 	case WM_T_82575:
   11611 	case WM_T_82576:
   11612 		reg = CSR_READ(sc, WMREG_MDIC);
   11613 		ismdio = ((reg & MDIC_DEST) != 0);
   11614 		break;
   11615 	case WM_T_82580:
   11616 	case WM_T_I350:
   11617 	case WM_T_I354:
   11618 	case WM_T_I210:
   11619 	case WM_T_I211:
   11620 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11621 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11622 		break;
   11623 	default:
   11624 		break;
   11625 	}
   11626 
   11627 	return ismdio;
   11628 }
   11629 
   11630 /*
   11631  * wm_sgmii_readreg:	[mii interface function]
   11632  *
   11633  *	Read a PHY register on the SGMII
   11634  * This could be handled by the PHY layer if we didn't have to lock the
   11635  * ressource ...
   11636  */
   11637 static int
   11638 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11639 {
   11640 	struct wm_softc *sc = device_private(dev);
   11641 	int rv;
   11642 
   11643 	if (sc->phy.acquire(sc)) {
   11644 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11645 		return -1;
   11646 	}
   11647 
   11648 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11649 
   11650 	sc->phy.release(sc);
   11651 	return rv;
   11652 }
   11653 
   11654 static int
   11655 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11656 {
   11657 	struct wm_softc *sc = device_private(dev);
   11658 	uint32_t i2ccmd;
   11659 	int i, rv = 0;
   11660 
   11661 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11662 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11663 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11664 
   11665 	/* Poll the ready bit */
   11666 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11667 		delay(50);
   11668 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11669 		if (i2ccmd & I2CCMD_READY)
   11670 			break;
   11671 	}
   11672 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11673 		device_printf(dev, "I2CCMD Read did not complete\n");
   11674 		rv = ETIMEDOUT;
   11675 	}
   11676 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11677 		device_printf(dev, "I2CCMD Error bit set\n");
   11678 		rv = EIO;
   11679 	}
   11680 
   11681 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11682 
   11683 	return rv;
   11684 }
   11685 
   11686 /*
   11687  * wm_sgmii_writereg:	[mii interface function]
   11688  *
   11689  *	Write a PHY register on the SGMII.
   11690  * This could be handled by the PHY layer if we didn't have to lock the
   11691  * ressource ...
   11692  */
   11693 static int
   11694 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11695 {
   11696 	struct wm_softc *sc = device_private(dev);
   11697 	int rv;
   11698 
   11699 	if (sc->phy.acquire(sc) != 0) {
   11700 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11701 		return -1;
   11702 	}
   11703 
   11704 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11705 
   11706 	sc->phy.release(sc);
   11707 
   11708 	return rv;
   11709 }
   11710 
   11711 static int
   11712 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11713 {
   11714 	struct wm_softc *sc = device_private(dev);
   11715 	uint32_t i2ccmd;
   11716 	uint16_t swapdata;
   11717 	int rv = 0;
   11718 	int i;
   11719 
   11720 	/* Swap the data bytes for the I2C interface */
   11721 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11722 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11723 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11724 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11725 
   11726 	/* Poll the ready bit */
   11727 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11728 		delay(50);
   11729 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11730 		if (i2ccmd & I2CCMD_READY)
   11731 			break;
   11732 	}
   11733 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11734 		device_printf(dev, "I2CCMD Write did not complete\n");
   11735 		rv = ETIMEDOUT;
   11736 	}
   11737 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11738 		device_printf(dev, "I2CCMD Error bit set\n");
   11739 		rv = EIO;
   11740 	}
   11741 
   11742 	return rv;
   11743 }
   11744 
   11745 /* TBI related */
   11746 
   11747 static bool
   11748 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11749 {
   11750 	bool sig;
   11751 
   11752 	sig = ctrl & CTRL_SWDPIN(1);
   11753 
   11754 	/*
   11755 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11756 	 * detect a signal, 1 if they don't.
   11757 	 */
   11758 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11759 		sig = !sig;
   11760 
   11761 	return sig;
   11762 }
   11763 
   11764 /*
   11765  * wm_tbi_mediainit:
   11766  *
   11767  *	Initialize media for use on 1000BASE-X devices.
   11768  */
   11769 static void
   11770 wm_tbi_mediainit(struct wm_softc *sc)
   11771 {
   11772 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11773 	const char *sep = "";
   11774 
   11775 	if (sc->sc_type < WM_T_82543)
   11776 		sc->sc_tipg = TIPG_WM_DFLT;
   11777 	else
   11778 		sc->sc_tipg = TIPG_LG_DFLT;
   11779 
   11780 	sc->sc_tbi_serdes_anegticks = 5;
   11781 
   11782 	/* Initialize our media structures */
   11783 	sc->sc_mii.mii_ifp = ifp;
   11784 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11785 
   11786 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11787 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11788 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11789 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11790 	else
   11791 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11792 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11793 
   11794 	/*
   11795 	 * SWD Pins:
   11796 	 *
   11797 	 *	0 = Link LED (output)
   11798 	 *	1 = Loss Of Signal (input)
   11799 	 */
   11800 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11801 
   11802 	/* XXX Perhaps this is only for TBI */
   11803 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11804 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11805 
   11806 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11807 		sc->sc_ctrl &= ~CTRL_LRST;
   11808 
   11809 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11810 
   11811 #define	ADD(ss, mm, dd)							\
   11812 do {									\
   11813 	aprint_normal("%s%s", sep, ss);					\
   11814 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11815 	sep = ", ";							\
   11816 } while (/*CONSTCOND*/0)
   11817 
   11818 	aprint_normal_dev(sc->sc_dev, "");
   11819 
   11820 	if (sc->sc_type == WM_T_I354) {
   11821 		uint32_t status;
   11822 
   11823 		status = CSR_READ(sc, WMREG_STATUS);
   11824 		if (((status & STATUS_2P5_SKU) != 0)
   11825 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11826 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11827 		} else
   11828 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11829 	} else if (sc->sc_type == WM_T_82545) {
   11830 		/* Only 82545 is LX (XXX except SFP) */
   11831 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11832 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11833 	} else if (sc->sc_sfptype != 0) {
   11834 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   11835 		switch (sc->sc_sfptype) {
   11836 		default:
   11837 		case SFF_SFP_ETH_FLAGS_1000SX:
   11838 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11839 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11840 			break;
   11841 		case SFF_SFP_ETH_FLAGS_1000LX:
   11842 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11843 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11844 			break;
   11845 		case SFF_SFP_ETH_FLAGS_1000CX:
   11846 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   11847 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   11848 			break;
   11849 		case SFF_SFP_ETH_FLAGS_1000T:
   11850 			ADD("1000baseT", IFM_1000_T, 0);
   11851 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   11852 			break;
   11853 		case SFF_SFP_ETH_FLAGS_100FX:
   11854 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   11855 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   11856 			break;
   11857 		}
   11858 	} else {
   11859 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11860 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11861 	}
   11862 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11863 	aprint_normal("\n");
   11864 
   11865 #undef ADD
   11866 
   11867 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11868 }
   11869 
   11870 /*
   11871  * wm_tbi_mediachange:	[ifmedia interface function]
   11872  *
   11873  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11874  */
   11875 static int
   11876 wm_tbi_mediachange(struct ifnet *ifp)
   11877 {
   11878 	struct wm_softc *sc = ifp->if_softc;
   11879 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11880 	uint32_t status, ctrl;
   11881 	bool signal;
   11882 	int i;
   11883 
   11884 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11885 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11886 		/* XXX need some work for >= 82571 and < 82575 */
   11887 		if (sc->sc_type < WM_T_82575)
   11888 			return 0;
   11889 	}
   11890 
   11891 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11892 	    || (sc->sc_type >= WM_T_82575))
   11893 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11894 
   11895 	sc->sc_ctrl &= ~CTRL_LRST;
   11896 	sc->sc_txcw = TXCW_ANE;
   11897 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11898 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11899 	else if (ife->ifm_media & IFM_FDX)
   11900 		sc->sc_txcw |= TXCW_FD;
   11901 	else
   11902 		sc->sc_txcw |= TXCW_HD;
   11903 
   11904 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11905 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11906 
   11907 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11908 		device_xname(sc->sc_dev), sc->sc_txcw));
   11909 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11910 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11911 	CSR_WRITE_FLUSH(sc);
   11912 	delay(1000);
   11913 
   11914 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11915 	signal = wm_tbi_havesignal(sc, ctrl);
   11916 
   11917 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11918 		signal));
   11919 
   11920 	if (signal) {
   11921 		/* Have signal; wait for the link to come up. */
   11922 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11923 			delay(10000);
   11924 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11925 				break;
   11926 		}
   11927 
   11928 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11929 			device_xname(sc->sc_dev), i));
   11930 
   11931 		status = CSR_READ(sc, WMREG_STATUS);
   11932 		DPRINTF(WM_DEBUG_LINK,
   11933 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11934 			device_xname(sc->sc_dev), status, STATUS_LU));
   11935 		if (status & STATUS_LU) {
   11936 			/* Link is up. */
   11937 			DPRINTF(WM_DEBUG_LINK,
   11938 			    ("%s: LINK: set media -> link up %s\n",
   11939 				device_xname(sc->sc_dev),
   11940 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11941 
   11942 			/*
   11943 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11944 			 * so we should update sc->sc_ctrl
   11945 			 */
   11946 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11947 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11948 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11949 			if (status & STATUS_FD)
   11950 				sc->sc_tctl |=
   11951 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11952 			else
   11953 				sc->sc_tctl |=
   11954 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11955 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11956 				sc->sc_fcrtl |= FCRTL_XONE;
   11957 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11958 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11959 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11960 			sc->sc_tbi_linkup = 1;
   11961 		} else {
   11962 			if (i == WM_LINKUP_TIMEOUT)
   11963 				wm_check_for_link(sc);
   11964 			/* Link is down. */
   11965 			DPRINTF(WM_DEBUG_LINK,
   11966 			    ("%s: LINK: set media -> link down\n",
   11967 				device_xname(sc->sc_dev)));
   11968 			sc->sc_tbi_linkup = 0;
   11969 		}
   11970 	} else {
   11971 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11972 			device_xname(sc->sc_dev)));
   11973 		sc->sc_tbi_linkup = 0;
   11974 	}
   11975 
   11976 	wm_tbi_serdes_set_linkled(sc);
   11977 
   11978 	return 0;
   11979 }
   11980 
   11981 /*
   11982  * wm_tbi_mediastatus:	[ifmedia interface function]
   11983  *
   11984  *	Get the current interface media status on a 1000BASE-X device.
   11985  */
   11986 static void
   11987 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11988 {
   11989 	struct wm_softc *sc = ifp->if_softc;
   11990 	uint32_t ctrl, status;
   11991 
   11992 	ifmr->ifm_status = IFM_AVALID;
   11993 	ifmr->ifm_active = IFM_ETHER;
   11994 
   11995 	status = CSR_READ(sc, WMREG_STATUS);
   11996 	if ((status & STATUS_LU) == 0) {
   11997 		ifmr->ifm_active |= IFM_NONE;
   11998 		return;
   11999 	}
   12000 
   12001 	ifmr->ifm_status |= IFM_ACTIVE;
   12002 	/* Only 82545 is LX */
   12003 	if (sc->sc_type == WM_T_82545)
   12004 		ifmr->ifm_active |= IFM_1000_LX;
   12005 	else
   12006 		ifmr->ifm_active |= IFM_1000_SX;
   12007 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12008 		ifmr->ifm_active |= IFM_FDX;
   12009 	else
   12010 		ifmr->ifm_active |= IFM_HDX;
   12011 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12012 	if (ctrl & CTRL_RFCE)
   12013 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12014 	if (ctrl & CTRL_TFCE)
   12015 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12016 }
   12017 
   12018 /* XXX TBI only */
   12019 static int
   12020 wm_check_for_link(struct wm_softc *sc)
   12021 {
   12022 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12023 	uint32_t rxcw;
   12024 	uint32_t ctrl;
   12025 	uint32_t status;
   12026 	bool signal;
   12027 
   12028 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   12029 		device_xname(sc->sc_dev), __func__));
   12030 
   12031 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12032 		/* XXX need some work for >= 82571 */
   12033 		if (sc->sc_type >= WM_T_82571) {
   12034 			sc->sc_tbi_linkup = 1;
   12035 			return 0;
   12036 		}
   12037 	}
   12038 
   12039 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12040 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12041 	status = CSR_READ(sc, WMREG_STATUS);
   12042 	signal = wm_tbi_havesignal(sc, ctrl);
   12043 
   12044 	DPRINTF(WM_DEBUG_LINK,
   12045 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12046 		device_xname(sc->sc_dev), __func__, signal,
   12047 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12048 
   12049 	/*
   12050 	 * SWDPIN   LU RXCW
   12051 	 *	0    0	  0
   12052 	 *	0    0	  1	(should not happen)
   12053 	 *	0    1	  0	(should not happen)
   12054 	 *	0    1	  1	(should not happen)
   12055 	 *	1    0	  0	Disable autonego and force linkup
   12056 	 *	1    0	  1	got /C/ but not linkup yet
   12057 	 *	1    1	  0	(linkup)
   12058 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12059 	 *
   12060 	 */
   12061 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12062 		DPRINTF(WM_DEBUG_LINK,
   12063 		    ("%s: %s: force linkup and fullduplex\n",
   12064 			device_xname(sc->sc_dev), __func__));
   12065 		sc->sc_tbi_linkup = 0;
   12066 		/* Disable auto-negotiation in the TXCW register */
   12067 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12068 
   12069 		/*
   12070 		 * Force link-up and also force full-duplex.
   12071 		 *
   12072 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12073 		 * so we should update sc->sc_ctrl
   12074 		 */
   12075 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12076 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12077 	} else if (((status & STATUS_LU) != 0)
   12078 	    && ((rxcw & RXCW_C) != 0)
   12079 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12080 		sc->sc_tbi_linkup = 1;
   12081 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12082 			device_xname(sc->sc_dev),
   12083 			__func__));
   12084 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12085 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12086 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12087 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12088 			device_xname(sc->sc_dev), __func__));
   12089 	} else {
   12090 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12091 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12092 			status));
   12093 	}
   12094 
   12095 	return 0;
   12096 }
   12097 
   12098 /*
   12099  * wm_tbi_tick:
   12100  *
   12101  *	Check the link on TBI devices.
   12102  *	This function acts as mii_tick().
   12103  */
   12104 static void
   12105 wm_tbi_tick(struct wm_softc *sc)
   12106 {
   12107 	struct mii_data *mii = &sc->sc_mii;
   12108 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12109 	uint32_t status;
   12110 
   12111 	KASSERT(WM_CORE_LOCKED(sc));
   12112 
   12113 	status = CSR_READ(sc, WMREG_STATUS);
   12114 
   12115 	/* XXX is this needed? */
   12116 	(void)CSR_READ(sc, WMREG_RXCW);
   12117 	(void)CSR_READ(sc, WMREG_CTRL);
   12118 
   12119 	/* set link status */
   12120 	if ((status & STATUS_LU) == 0) {
   12121 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12122 			device_xname(sc->sc_dev)));
   12123 		sc->sc_tbi_linkup = 0;
   12124 	} else if (sc->sc_tbi_linkup == 0) {
   12125 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12126 			device_xname(sc->sc_dev),
   12127 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12128 		sc->sc_tbi_linkup = 1;
   12129 		sc->sc_tbi_serdes_ticks = 0;
   12130 	}
   12131 
   12132 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12133 		goto setled;
   12134 
   12135 	if ((status & STATUS_LU) == 0) {
   12136 		sc->sc_tbi_linkup = 0;
   12137 		/* If the timer expired, retry autonegotiation */
   12138 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12139 		    && (++sc->sc_tbi_serdes_ticks
   12140 			>= sc->sc_tbi_serdes_anegticks)) {
   12141 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12142 				device_xname(sc->sc_dev), __func__));
   12143 			sc->sc_tbi_serdes_ticks = 0;
   12144 			/*
   12145 			 * Reset the link, and let autonegotiation do
   12146 			 * its thing
   12147 			 */
   12148 			sc->sc_ctrl |= CTRL_LRST;
   12149 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12150 			CSR_WRITE_FLUSH(sc);
   12151 			delay(1000);
   12152 			sc->sc_ctrl &= ~CTRL_LRST;
   12153 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12154 			CSR_WRITE_FLUSH(sc);
   12155 			delay(1000);
   12156 			CSR_WRITE(sc, WMREG_TXCW,
   12157 			    sc->sc_txcw & ~TXCW_ANE);
   12158 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12159 		}
   12160 	}
   12161 
   12162 setled:
   12163 	wm_tbi_serdes_set_linkled(sc);
   12164 }
   12165 
   12166 /* SERDES related */
   12167 static void
   12168 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12169 {
   12170 	uint32_t reg;
   12171 
   12172 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12173 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12174 		return;
   12175 
   12176 	/* Enable PCS to turn on link */
   12177 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12178 	reg |= PCS_CFG_PCS_EN;
   12179 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12180 
   12181 	/* Power up the laser */
   12182 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12183 	reg &= ~CTRL_EXT_SWDPIN(3);
   12184 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12185 
   12186 	/* Flush the write to verify completion */
   12187 	CSR_WRITE_FLUSH(sc);
   12188 }
   12189 
   12190 static int
   12191 wm_serdes_mediachange(struct ifnet *ifp)
   12192 {
   12193 	struct wm_softc *sc = ifp->if_softc;
   12194 	bool pcs_autoneg = true; /* XXX */
   12195 	uint32_t ctrl_ext, pcs_lctl, reg;
   12196 
   12197 	/* XXX Currently, this function is not called on 8257[12] */
   12198 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12199 	    || (sc->sc_type >= WM_T_82575))
   12200 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12201 
   12202 	wm_serdes_power_up_link_82575(sc);
   12203 
   12204 	sc->sc_ctrl |= CTRL_SLU;
   12205 
   12206 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12207 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12208 
   12209 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12210 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12211 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12212 	case CTRL_EXT_LINK_MODE_SGMII:
   12213 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12214 		pcs_autoneg = true;
   12215 		/* Autoneg time out should be disabled for SGMII mode */
   12216 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12217 		break;
   12218 	case CTRL_EXT_LINK_MODE_1000KX:
   12219 		pcs_autoneg = false;
   12220 		/* FALLTHROUGH */
   12221 	default:
   12222 		if ((sc->sc_type == WM_T_82575)
   12223 		    || (sc->sc_type == WM_T_82576)) {
   12224 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12225 				pcs_autoneg = false;
   12226 		}
   12227 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12228 		    | CTRL_FRCFDX;
   12229 
   12230 		/* Set speed of 1000/Full if speed/duplex is forced */
   12231 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12232 	}
   12233 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12234 
   12235 	if (pcs_autoneg) {
   12236 		/* Set PCS register for autoneg */
   12237 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12238 
   12239 		/* Disable force flow control for autoneg */
   12240 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12241 
   12242 		/* Configure flow control advertisement for autoneg */
   12243 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12244 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12245 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12246 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12247 	} else
   12248 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12249 
   12250 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12251 
   12252 
   12253 	return 0;
   12254 }
   12255 
   12256 static void
   12257 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12258 {
   12259 	struct wm_softc *sc = ifp->if_softc;
   12260 	struct mii_data *mii = &sc->sc_mii;
   12261 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12262 	uint32_t pcs_adv, pcs_lpab, reg;
   12263 
   12264 	ifmr->ifm_status = IFM_AVALID;
   12265 	ifmr->ifm_active = IFM_ETHER;
   12266 
   12267 	/* Check PCS */
   12268 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12269 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12270 		ifmr->ifm_active |= IFM_NONE;
   12271 		sc->sc_tbi_linkup = 0;
   12272 		goto setled;
   12273 	}
   12274 
   12275 	sc->sc_tbi_linkup = 1;
   12276 	ifmr->ifm_status |= IFM_ACTIVE;
   12277 	if (sc->sc_type == WM_T_I354) {
   12278 		uint32_t status;
   12279 
   12280 		status = CSR_READ(sc, WMREG_STATUS);
   12281 		if (((status & STATUS_2P5_SKU) != 0)
   12282 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12283 			ifmr->ifm_active |= IFM_2500_KX;
   12284 		} else
   12285 			ifmr->ifm_active |= IFM_1000_KX;
   12286 	} else {
   12287 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12288 		case PCS_LSTS_SPEED_10:
   12289 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12290 			break;
   12291 		case PCS_LSTS_SPEED_100:
   12292 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12293 			break;
   12294 		case PCS_LSTS_SPEED_1000:
   12295 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12296 			break;
   12297 		default:
   12298 			device_printf(sc->sc_dev, "Unknown speed\n");
   12299 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12300 			break;
   12301 		}
   12302 	}
   12303 	if ((reg & PCS_LSTS_FDX) != 0)
   12304 		ifmr->ifm_active |= IFM_FDX;
   12305 	else
   12306 		ifmr->ifm_active |= IFM_HDX;
   12307 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12308 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12309 		/* Check flow */
   12310 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12311 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12312 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12313 			goto setled;
   12314 		}
   12315 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12316 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12317 		DPRINTF(WM_DEBUG_LINK,
   12318 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12319 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12320 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12321 			mii->mii_media_active |= IFM_FLOW
   12322 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12323 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12324 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12325 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12326 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12327 			mii->mii_media_active |= IFM_FLOW
   12328 			    | IFM_ETH_TXPAUSE;
   12329 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12330 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12331 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12332 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12333 			mii->mii_media_active |= IFM_FLOW
   12334 			    | IFM_ETH_RXPAUSE;
   12335 		}
   12336 	}
   12337 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12338 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12339 setled:
   12340 	wm_tbi_serdes_set_linkled(sc);
   12341 }
   12342 
   12343 /*
   12344  * wm_serdes_tick:
   12345  *
   12346  *	Check the link on serdes devices.
   12347  */
   12348 static void
   12349 wm_serdes_tick(struct wm_softc *sc)
   12350 {
   12351 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12352 	struct mii_data *mii = &sc->sc_mii;
   12353 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12354 	uint32_t reg;
   12355 
   12356 	KASSERT(WM_CORE_LOCKED(sc));
   12357 
   12358 	mii->mii_media_status = IFM_AVALID;
   12359 	mii->mii_media_active = IFM_ETHER;
   12360 
   12361 	/* Check PCS */
   12362 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12363 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12364 		mii->mii_media_status |= IFM_ACTIVE;
   12365 		sc->sc_tbi_linkup = 1;
   12366 		sc->sc_tbi_serdes_ticks = 0;
   12367 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12368 		if ((reg & PCS_LSTS_FDX) != 0)
   12369 			mii->mii_media_active |= IFM_FDX;
   12370 		else
   12371 			mii->mii_media_active |= IFM_HDX;
   12372 	} else {
   12373 		mii->mii_media_status |= IFM_NONE;
   12374 		sc->sc_tbi_linkup = 0;
   12375 		/* If the timer expired, retry autonegotiation */
   12376 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12377 		    && (++sc->sc_tbi_serdes_ticks
   12378 			>= sc->sc_tbi_serdes_anegticks)) {
   12379 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12380 				device_xname(sc->sc_dev), __func__));
   12381 			sc->sc_tbi_serdes_ticks = 0;
   12382 			/* XXX */
   12383 			wm_serdes_mediachange(ifp);
   12384 		}
   12385 	}
   12386 
   12387 	wm_tbi_serdes_set_linkled(sc);
   12388 }
   12389 
   12390 /* SFP related */
   12391 
   12392 static int
   12393 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12394 {
   12395 	uint32_t i2ccmd;
   12396 	int i;
   12397 
   12398 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12399 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12400 
   12401 	/* Poll the ready bit */
   12402 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12403 		delay(50);
   12404 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12405 		if (i2ccmd & I2CCMD_READY)
   12406 			break;
   12407 	}
   12408 	if ((i2ccmd & I2CCMD_READY) == 0)
   12409 		return -1;
   12410 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12411 		return -1;
   12412 
   12413 	*data = i2ccmd & 0x00ff;
   12414 
   12415 	return 0;
   12416 }
   12417 
   12418 static uint32_t
   12419 wm_sfp_get_media_type(struct wm_softc *sc)
   12420 {
   12421 	uint32_t ctrl_ext;
   12422 	uint8_t val = 0;
   12423 	int timeout = 3;
   12424 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12425 	int rv = -1;
   12426 
   12427 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12428 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12429 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12430 	CSR_WRITE_FLUSH(sc);
   12431 
   12432 	/* Read SFP module data */
   12433 	while (timeout) {
   12434 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12435 		if (rv == 0)
   12436 			break;
   12437 		delay(100*1000); /* XXX too big */
   12438 		timeout--;
   12439 	}
   12440 	if (rv != 0)
   12441 		goto out;
   12442 
   12443 	switch (val) {
   12444 	case SFF_SFP_ID_SFF:
   12445 		aprint_normal_dev(sc->sc_dev,
   12446 		    "Module/Connector soldered to board\n");
   12447 		break;
   12448 	case SFF_SFP_ID_SFP:
   12449 		sc->sc_flags |= WM_F_SFP;
   12450 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   12451 		break;
   12452 	case SFF_SFP_ID_UNKNOWN:
   12453 		goto out;
   12454 	default:
   12455 		break;
   12456 	}
   12457 
   12458 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12459 	if (rv != 0)
   12460 		goto out;
   12461 
   12462 	sc->sc_sfptype = val;
   12463 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12464 		mediatype = WM_MEDIATYPE_SERDES;
   12465 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12466 		sc->sc_flags |= WM_F_SGMII;
   12467 		mediatype = WM_MEDIATYPE_COPPER;
   12468 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12469 		sc->sc_flags |= WM_F_SGMII;
   12470 		mediatype = WM_MEDIATYPE_SERDES;
   12471 	} else {
   12472 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   12473 		    __func__, sc->sc_sfptype);
   12474 		sc->sc_sfptype = 0; /* XXX unknown */
   12475 	}
   12476 
   12477 out:
   12478 	/* Restore I2C interface setting */
   12479 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12480 
   12481 	return mediatype;
   12482 }
   12483 
   12484 /*
   12485  * NVM related.
   12486  * Microwire, SPI (w/wo EERD) and Flash.
   12487  */
   12488 
   12489 /* Both spi and uwire */
   12490 
   12491 /*
   12492  * wm_eeprom_sendbits:
   12493  *
   12494  *	Send a series of bits to the EEPROM.
   12495  */
   12496 static void
   12497 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12498 {
   12499 	uint32_t reg;
   12500 	int x;
   12501 
   12502 	reg = CSR_READ(sc, WMREG_EECD);
   12503 
   12504 	for (x = nbits; x > 0; x--) {
   12505 		if (bits & (1U << (x - 1)))
   12506 			reg |= EECD_DI;
   12507 		else
   12508 			reg &= ~EECD_DI;
   12509 		CSR_WRITE(sc, WMREG_EECD, reg);
   12510 		CSR_WRITE_FLUSH(sc);
   12511 		delay(2);
   12512 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12513 		CSR_WRITE_FLUSH(sc);
   12514 		delay(2);
   12515 		CSR_WRITE(sc, WMREG_EECD, reg);
   12516 		CSR_WRITE_FLUSH(sc);
   12517 		delay(2);
   12518 	}
   12519 }
   12520 
   12521 /*
   12522  * wm_eeprom_recvbits:
   12523  *
   12524  *	Receive a series of bits from the EEPROM.
   12525  */
   12526 static void
   12527 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12528 {
   12529 	uint32_t reg, val;
   12530 	int x;
   12531 
   12532 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12533 
   12534 	val = 0;
   12535 	for (x = nbits; x > 0; x--) {
   12536 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12537 		CSR_WRITE_FLUSH(sc);
   12538 		delay(2);
   12539 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12540 			val |= (1U << (x - 1));
   12541 		CSR_WRITE(sc, WMREG_EECD, reg);
   12542 		CSR_WRITE_FLUSH(sc);
   12543 		delay(2);
   12544 	}
   12545 	*valp = val;
   12546 }
   12547 
   12548 /* Microwire */
   12549 
   12550 /*
   12551  * wm_nvm_read_uwire:
   12552  *
   12553  *	Read a word from the EEPROM using the MicroWire protocol.
   12554  */
   12555 static int
   12556 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12557 {
   12558 	uint32_t reg, val;
   12559 	int i;
   12560 
   12561 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12562 		device_xname(sc->sc_dev), __func__));
   12563 
   12564 	if (sc->nvm.acquire(sc) != 0)
   12565 		return -1;
   12566 
   12567 	for (i = 0; i < wordcnt; i++) {
   12568 		/* Clear SK and DI. */
   12569 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12570 		CSR_WRITE(sc, WMREG_EECD, reg);
   12571 
   12572 		/*
   12573 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12574 		 * and Xen.
   12575 		 *
   12576 		 * We use this workaround only for 82540 because qemu's
   12577 		 * e1000 act as 82540.
   12578 		 */
   12579 		if (sc->sc_type == WM_T_82540) {
   12580 			reg |= EECD_SK;
   12581 			CSR_WRITE(sc, WMREG_EECD, reg);
   12582 			reg &= ~EECD_SK;
   12583 			CSR_WRITE(sc, WMREG_EECD, reg);
   12584 			CSR_WRITE_FLUSH(sc);
   12585 			delay(2);
   12586 		}
   12587 		/* XXX: end of workaround */
   12588 
   12589 		/* Set CHIP SELECT. */
   12590 		reg |= EECD_CS;
   12591 		CSR_WRITE(sc, WMREG_EECD, reg);
   12592 		CSR_WRITE_FLUSH(sc);
   12593 		delay(2);
   12594 
   12595 		/* Shift in the READ command. */
   12596 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12597 
   12598 		/* Shift in address. */
   12599 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12600 
   12601 		/* Shift out the data. */
   12602 		wm_eeprom_recvbits(sc, &val, 16);
   12603 		data[i] = val & 0xffff;
   12604 
   12605 		/* Clear CHIP SELECT. */
   12606 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12607 		CSR_WRITE(sc, WMREG_EECD, reg);
   12608 		CSR_WRITE_FLUSH(sc);
   12609 		delay(2);
   12610 	}
   12611 
   12612 	sc->nvm.release(sc);
   12613 	return 0;
   12614 }
   12615 
   12616 /* SPI */
   12617 
   12618 /*
   12619  * Set SPI and FLASH related information from the EECD register.
   12620  * For 82541 and 82547, the word size is taken from EEPROM.
   12621  */
   12622 static int
   12623 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12624 {
   12625 	int size;
   12626 	uint32_t reg;
   12627 	uint16_t data;
   12628 
   12629 	reg = CSR_READ(sc, WMREG_EECD);
   12630 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12631 
   12632 	/* Read the size of NVM from EECD by default */
   12633 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12634 	switch (sc->sc_type) {
   12635 	case WM_T_82541:
   12636 	case WM_T_82541_2:
   12637 	case WM_T_82547:
   12638 	case WM_T_82547_2:
   12639 		/* Set dummy value to access EEPROM */
   12640 		sc->sc_nvm_wordsize = 64;
   12641 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12642 			aprint_error_dev(sc->sc_dev,
   12643 			    "%s: failed to read EEPROM size\n", __func__);
   12644 		}
   12645 		reg = data;
   12646 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12647 		if (size == 0)
   12648 			size = 6; /* 64 word size */
   12649 		else
   12650 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12651 		break;
   12652 	case WM_T_80003:
   12653 	case WM_T_82571:
   12654 	case WM_T_82572:
   12655 	case WM_T_82573: /* SPI case */
   12656 	case WM_T_82574: /* SPI case */
   12657 	case WM_T_82583: /* SPI case */
   12658 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12659 		if (size > 14)
   12660 			size = 14;
   12661 		break;
   12662 	case WM_T_82575:
   12663 	case WM_T_82576:
   12664 	case WM_T_82580:
   12665 	case WM_T_I350:
   12666 	case WM_T_I354:
   12667 	case WM_T_I210:
   12668 	case WM_T_I211:
   12669 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12670 		if (size > 15)
   12671 			size = 15;
   12672 		break;
   12673 	default:
   12674 		aprint_error_dev(sc->sc_dev,
   12675 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12676 		return -1;
   12677 		break;
   12678 	}
   12679 
   12680 	sc->sc_nvm_wordsize = 1 << size;
   12681 
   12682 	return 0;
   12683 }
   12684 
   12685 /*
   12686  * wm_nvm_ready_spi:
   12687  *
   12688  *	Wait for a SPI EEPROM to be ready for commands.
   12689  */
   12690 static int
   12691 wm_nvm_ready_spi(struct wm_softc *sc)
   12692 {
   12693 	uint32_t val;
   12694 	int usec;
   12695 
   12696 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12697 		device_xname(sc->sc_dev), __func__));
   12698 
   12699 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12700 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12701 		wm_eeprom_recvbits(sc, &val, 8);
   12702 		if ((val & SPI_SR_RDY) == 0)
   12703 			break;
   12704 	}
   12705 	if (usec >= SPI_MAX_RETRIES) {
   12706 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12707 		return -1;
   12708 	}
   12709 	return 0;
   12710 }
   12711 
   12712 /*
   12713  * wm_nvm_read_spi:
   12714  *
   12715  *	Read a work from the EEPROM using the SPI protocol.
   12716  */
   12717 static int
   12718 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12719 {
   12720 	uint32_t reg, val;
   12721 	int i;
   12722 	uint8_t opc;
   12723 	int rv = 0;
   12724 
   12725 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12726 		device_xname(sc->sc_dev), __func__));
   12727 
   12728 	if (sc->nvm.acquire(sc) != 0)
   12729 		return -1;
   12730 
   12731 	/* Clear SK and CS. */
   12732 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12733 	CSR_WRITE(sc, WMREG_EECD, reg);
   12734 	CSR_WRITE_FLUSH(sc);
   12735 	delay(2);
   12736 
   12737 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12738 		goto out;
   12739 
   12740 	/* Toggle CS to flush commands. */
   12741 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12742 	CSR_WRITE_FLUSH(sc);
   12743 	delay(2);
   12744 	CSR_WRITE(sc, WMREG_EECD, reg);
   12745 	CSR_WRITE_FLUSH(sc);
   12746 	delay(2);
   12747 
   12748 	opc = SPI_OPC_READ;
   12749 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12750 		opc |= SPI_OPC_A8;
   12751 
   12752 	wm_eeprom_sendbits(sc, opc, 8);
   12753 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12754 
   12755 	for (i = 0; i < wordcnt; i++) {
   12756 		wm_eeprom_recvbits(sc, &val, 16);
   12757 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12758 	}
   12759 
   12760 	/* Raise CS and clear SK. */
   12761 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12762 	CSR_WRITE(sc, WMREG_EECD, reg);
   12763 	CSR_WRITE_FLUSH(sc);
   12764 	delay(2);
   12765 
   12766 out:
   12767 	sc->nvm.release(sc);
   12768 	return rv;
   12769 }
   12770 
   12771 /* Using with EERD */
   12772 
   12773 static int
   12774 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12775 {
   12776 	uint32_t attempts = 100000;
   12777 	uint32_t i, reg = 0;
   12778 	int32_t done = -1;
   12779 
   12780 	for (i = 0; i < attempts; i++) {
   12781 		reg = CSR_READ(sc, rw);
   12782 
   12783 		if (reg & EERD_DONE) {
   12784 			done = 0;
   12785 			break;
   12786 		}
   12787 		delay(5);
   12788 	}
   12789 
   12790 	return done;
   12791 }
   12792 
   12793 static int
   12794 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12795 {
   12796 	int i, eerd = 0;
   12797 	int rv = 0;
   12798 
   12799 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12800 		device_xname(sc->sc_dev), __func__));
   12801 
   12802 	if (sc->nvm.acquire(sc) != 0)
   12803 		return -1;
   12804 
   12805 	for (i = 0; i < wordcnt; i++) {
   12806 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12807 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12808 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12809 		if (rv != 0) {
   12810 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12811 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12812 			break;
   12813 		}
   12814 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12815 	}
   12816 
   12817 	sc->nvm.release(sc);
   12818 	return rv;
   12819 }
   12820 
   12821 /* Flash */
   12822 
   12823 static int
   12824 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12825 {
   12826 	uint32_t eecd;
   12827 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12828 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12829 	uint32_t nvm_dword = 0;
   12830 	uint8_t sig_byte = 0;
   12831 	int rv;
   12832 
   12833 	switch (sc->sc_type) {
   12834 	case WM_T_PCH_SPT:
   12835 	case WM_T_PCH_CNP:
   12836 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12837 		act_offset = ICH_NVM_SIG_WORD * 2;
   12838 
   12839 		/* Set bank to 0 in case flash read fails. */
   12840 		*bank = 0;
   12841 
   12842 		/* Check bank 0 */
   12843 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12844 		if (rv != 0)
   12845 			return rv;
   12846 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12847 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12848 			*bank = 0;
   12849 			return 0;
   12850 		}
   12851 
   12852 		/* Check bank 1 */
   12853 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12854 		    &nvm_dword);
   12855 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12856 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12857 			*bank = 1;
   12858 			return 0;
   12859 		}
   12860 		aprint_error_dev(sc->sc_dev,
   12861 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12862 		return -1;
   12863 	case WM_T_ICH8:
   12864 	case WM_T_ICH9:
   12865 		eecd = CSR_READ(sc, WMREG_EECD);
   12866 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12867 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12868 			return 0;
   12869 		}
   12870 		/* FALLTHROUGH */
   12871 	default:
   12872 		/* Default to 0 */
   12873 		*bank = 0;
   12874 
   12875 		/* Check bank 0 */
   12876 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12877 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12878 			*bank = 0;
   12879 			return 0;
   12880 		}
   12881 
   12882 		/* Check bank 1 */
   12883 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12884 		    &sig_byte);
   12885 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12886 			*bank = 1;
   12887 			return 0;
   12888 		}
   12889 	}
   12890 
   12891 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12892 		device_xname(sc->sc_dev)));
   12893 	return -1;
   12894 }
   12895 
   12896 /******************************************************************************
   12897  * This function does initial flash setup so that a new read/write/erase cycle
   12898  * can be started.
   12899  *
   12900  * sc - The pointer to the hw structure
   12901  ****************************************************************************/
   12902 static int32_t
   12903 wm_ich8_cycle_init(struct wm_softc *sc)
   12904 {
   12905 	uint16_t hsfsts;
   12906 	int32_t error = 1;
   12907 	int32_t i     = 0;
   12908 
   12909 	if (sc->sc_type >= WM_T_PCH_SPT)
   12910 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12911 	else
   12912 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12913 
   12914 	/* May be check the Flash Des Valid bit in Hw status */
   12915 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12916 		return error;
   12917 
   12918 	/* Clear FCERR in Hw status by writing 1 */
   12919 	/* Clear DAEL in Hw status by writing a 1 */
   12920 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12921 
   12922 	if (sc->sc_type >= WM_T_PCH_SPT)
   12923 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12924 	else
   12925 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12926 
   12927 	/*
   12928 	 * Either we should have a hardware SPI cycle in progress bit to check
   12929 	 * against, in order to start a new cycle or FDONE bit should be
   12930 	 * changed in the hardware so that it is 1 after hardware reset, which
   12931 	 * can then be used as an indication whether a cycle is in progress or
   12932 	 * has been completed .. we should also have some software semaphore
   12933 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12934 	 * threads access to those bits can be sequentiallized or a way so that
   12935 	 * 2 threads don't start the cycle at the same time
   12936 	 */
   12937 
   12938 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12939 		/*
   12940 		 * There is no cycle running at present, so we can start a
   12941 		 * cycle
   12942 		 */
   12943 
   12944 		/* Begin by setting Flash Cycle Done. */
   12945 		hsfsts |= HSFSTS_DONE;
   12946 		if (sc->sc_type >= WM_T_PCH_SPT)
   12947 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12948 			    hsfsts & 0xffffUL);
   12949 		else
   12950 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12951 		error = 0;
   12952 	} else {
   12953 		/*
   12954 		 * Otherwise poll for sometime so the current cycle has a
   12955 		 * chance to end before giving up.
   12956 		 */
   12957 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12958 			if (sc->sc_type >= WM_T_PCH_SPT)
   12959 				hsfsts = ICH8_FLASH_READ32(sc,
   12960 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12961 			else
   12962 				hsfsts = ICH8_FLASH_READ16(sc,
   12963 				    ICH_FLASH_HSFSTS);
   12964 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12965 				error = 0;
   12966 				break;
   12967 			}
   12968 			delay(1);
   12969 		}
   12970 		if (error == 0) {
   12971 			/*
   12972 			 * Successful in waiting for previous cycle to timeout,
   12973 			 * now set the Flash Cycle Done.
   12974 			 */
   12975 			hsfsts |= HSFSTS_DONE;
   12976 			if (sc->sc_type >= WM_T_PCH_SPT)
   12977 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12978 				    hsfsts & 0xffffUL);
   12979 			else
   12980 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12981 				    hsfsts);
   12982 		}
   12983 	}
   12984 	return error;
   12985 }
   12986 
   12987 /******************************************************************************
   12988  * This function starts a flash cycle and waits for its completion
   12989  *
   12990  * sc - The pointer to the hw structure
   12991  ****************************************************************************/
   12992 static int32_t
   12993 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12994 {
   12995 	uint16_t hsflctl;
   12996 	uint16_t hsfsts;
   12997 	int32_t error = 1;
   12998 	uint32_t i = 0;
   12999 
   13000 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13001 	if (sc->sc_type >= WM_T_PCH_SPT)
   13002 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13003 	else
   13004 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13005 	hsflctl |= HSFCTL_GO;
   13006 	if (sc->sc_type >= WM_T_PCH_SPT)
   13007 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13008 		    (uint32_t)hsflctl << 16);
   13009 	else
   13010 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13011 
   13012 	/* Wait till FDONE bit is set to 1 */
   13013 	do {
   13014 		if (sc->sc_type >= WM_T_PCH_SPT)
   13015 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13016 			    & 0xffffUL;
   13017 		else
   13018 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13019 		if (hsfsts & HSFSTS_DONE)
   13020 			break;
   13021 		delay(1);
   13022 		i++;
   13023 	} while (i < timeout);
   13024 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13025 		error = 0;
   13026 
   13027 	return error;
   13028 }
   13029 
   13030 /******************************************************************************
   13031  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13032  *
   13033  * sc - The pointer to the hw structure
   13034  * index - The index of the byte or word to read.
   13035  * size - Size of data to read, 1=byte 2=word, 4=dword
   13036  * data - Pointer to the word to store the value read.
   13037  *****************************************************************************/
   13038 static int32_t
   13039 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13040     uint32_t size, uint32_t *data)
   13041 {
   13042 	uint16_t hsfsts;
   13043 	uint16_t hsflctl;
   13044 	uint32_t flash_linear_address;
   13045 	uint32_t flash_data = 0;
   13046 	int32_t error = 1;
   13047 	int32_t count = 0;
   13048 
   13049 	if (size < 1  || size > 4 || data == 0x0 ||
   13050 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13051 		return error;
   13052 
   13053 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13054 	    sc->sc_ich8_flash_base;
   13055 
   13056 	do {
   13057 		delay(1);
   13058 		/* Steps */
   13059 		error = wm_ich8_cycle_init(sc);
   13060 		if (error)
   13061 			break;
   13062 
   13063 		if (sc->sc_type >= WM_T_PCH_SPT)
   13064 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13065 			    >> 16;
   13066 		else
   13067 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13068 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13069 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13070 		    & HSFCTL_BCOUNT_MASK;
   13071 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13072 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13073 			/*
   13074 			 * In SPT, This register is in Lan memory space, not
   13075 			 * flash. Therefore, only 32 bit access is supported.
   13076 			 */
   13077 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13078 			    (uint32_t)hsflctl << 16);
   13079 		} else
   13080 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13081 
   13082 		/*
   13083 		 * Write the last 24 bits of index into Flash Linear address
   13084 		 * field in Flash Address
   13085 		 */
   13086 		/* TODO: TBD maybe check the index against the size of flash */
   13087 
   13088 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13089 
   13090 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13091 
   13092 		/*
   13093 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13094 		 * the whole sequence a few more times, else read in (shift in)
   13095 		 * the Flash Data0, the order is least significant byte first
   13096 		 * msb to lsb
   13097 		 */
   13098 		if (error == 0) {
   13099 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13100 			if (size == 1)
   13101 				*data = (uint8_t)(flash_data & 0x000000FF);
   13102 			else if (size == 2)
   13103 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13104 			else if (size == 4)
   13105 				*data = (uint32_t)flash_data;
   13106 			break;
   13107 		} else {
   13108 			/*
   13109 			 * If we've gotten here, then things are probably
   13110 			 * completely hosed, but if the error condition is
   13111 			 * detected, it won't hurt to give it another try...
   13112 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13113 			 */
   13114 			if (sc->sc_type >= WM_T_PCH_SPT)
   13115 				hsfsts = ICH8_FLASH_READ32(sc,
   13116 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13117 			else
   13118 				hsfsts = ICH8_FLASH_READ16(sc,
   13119 				    ICH_FLASH_HSFSTS);
   13120 
   13121 			if (hsfsts & HSFSTS_ERR) {
   13122 				/* Repeat for some time before giving up. */
   13123 				continue;
   13124 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13125 				break;
   13126 		}
   13127 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13128 
   13129 	return error;
   13130 }
   13131 
   13132 /******************************************************************************
   13133  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13134  *
   13135  * sc - pointer to wm_hw structure
   13136  * index - The index of the byte to read.
   13137  * data - Pointer to a byte to store the value read.
   13138  *****************************************************************************/
   13139 static int32_t
   13140 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13141 {
   13142 	int32_t status;
   13143 	uint32_t word = 0;
   13144 
   13145 	status = wm_read_ich8_data(sc, index, 1, &word);
   13146 	if (status == 0)
   13147 		*data = (uint8_t)word;
   13148 	else
   13149 		*data = 0;
   13150 
   13151 	return status;
   13152 }
   13153 
   13154 /******************************************************************************
   13155  * Reads a word from the NVM using the ICH8 flash access registers.
   13156  *
   13157  * sc - pointer to wm_hw structure
   13158  * index - The starting byte index of the word to read.
   13159  * data - Pointer to a word to store the value read.
   13160  *****************************************************************************/
   13161 static int32_t
   13162 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13163 {
   13164 	int32_t status;
   13165 	uint32_t word = 0;
   13166 
   13167 	status = wm_read_ich8_data(sc, index, 2, &word);
   13168 	if (status == 0)
   13169 		*data = (uint16_t)word;
   13170 	else
   13171 		*data = 0;
   13172 
   13173 	return status;
   13174 }
   13175 
   13176 /******************************************************************************
   13177  * Reads a dword from the NVM using the ICH8 flash access registers.
   13178  *
   13179  * sc - pointer to wm_hw structure
   13180  * index - The starting byte index of the word to read.
   13181  * data - Pointer to a word to store the value read.
   13182  *****************************************************************************/
   13183 static int32_t
   13184 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13185 {
   13186 	int32_t status;
   13187 
   13188 	status = wm_read_ich8_data(sc, index, 4, data);
   13189 	return status;
   13190 }
   13191 
   13192 /******************************************************************************
   13193  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13194  * register.
   13195  *
   13196  * sc - Struct containing variables accessed by shared code
   13197  * offset - offset of word in the EEPROM to read
   13198  * data - word read from the EEPROM
   13199  * words - number of words to read
   13200  *****************************************************************************/
   13201 static int
   13202 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13203 {
   13204 	int32_t	 rv = 0;
   13205 	uint32_t flash_bank = 0;
   13206 	uint32_t act_offset = 0;
   13207 	uint32_t bank_offset = 0;
   13208 	uint16_t word = 0;
   13209 	uint16_t i = 0;
   13210 
   13211 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13212 		device_xname(sc->sc_dev), __func__));
   13213 
   13214 	if (sc->nvm.acquire(sc) != 0)
   13215 		return -1;
   13216 
   13217 	/*
   13218 	 * We need to know which is the valid flash bank.  In the event
   13219 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13220 	 * managing flash_bank. So it cannot be trusted and needs
   13221 	 * to be updated with each read.
   13222 	 */
   13223 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13224 	if (rv) {
   13225 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13226 			device_xname(sc->sc_dev)));
   13227 		flash_bank = 0;
   13228 	}
   13229 
   13230 	/*
   13231 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13232 	 * size
   13233 	 */
   13234 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13235 
   13236 	for (i = 0; i < words; i++) {
   13237 		/* The NVM part needs a byte offset, hence * 2 */
   13238 		act_offset = bank_offset + ((offset + i) * 2);
   13239 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13240 		if (rv) {
   13241 			aprint_error_dev(sc->sc_dev,
   13242 			    "%s: failed to read NVM\n", __func__);
   13243 			break;
   13244 		}
   13245 		data[i] = word;
   13246 	}
   13247 
   13248 	sc->nvm.release(sc);
   13249 	return rv;
   13250 }
   13251 
   13252 /******************************************************************************
   13253  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13254  * register.
   13255  *
   13256  * sc - Struct containing variables accessed by shared code
   13257  * offset - offset of word in the EEPROM to read
   13258  * data - word read from the EEPROM
   13259  * words - number of words to read
   13260  *****************************************************************************/
   13261 static int
   13262 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13263 {
   13264 	int32_t	 rv = 0;
   13265 	uint32_t flash_bank = 0;
   13266 	uint32_t act_offset = 0;
   13267 	uint32_t bank_offset = 0;
   13268 	uint32_t dword = 0;
   13269 	uint16_t i = 0;
   13270 
   13271 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13272 		device_xname(sc->sc_dev), __func__));
   13273 
   13274 	if (sc->nvm.acquire(sc) != 0)
   13275 		return -1;
   13276 
   13277 	/*
   13278 	 * We need to know which is the valid flash bank.  In the event
   13279 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13280 	 * managing flash_bank. So it cannot be trusted and needs
   13281 	 * to be updated with each read.
   13282 	 */
   13283 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13284 	if (rv) {
   13285 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13286 			device_xname(sc->sc_dev)));
   13287 		flash_bank = 0;
   13288 	}
   13289 
   13290 	/*
   13291 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13292 	 * size
   13293 	 */
   13294 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13295 
   13296 	for (i = 0; i < words; i++) {
   13297 		/* The NVM part needs a byte offset, hence * 2 */
   13298 		act_offset = bank_offset + ((offset + i) * 2);
   13299 		/* but we must read dword aligned, so mask ... */
   13300 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13301 		if (rv) {
   13302 			aprint_error_dev(sc->sc_dev,
   13303 			    "%s: failed to read NVM\n", __func__);
   13304 			break;
   13305 		}
   13306 		/* ... and pick out low or high word */
   13307 		if ((act_offset & 0x2) == 0)
   13308 			data[i] = (uint16_t)(dword & 0xFFFF);
   13309 		else
   13310 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13311 	}
   13312 
   13313 	sc->nvm.release(sc);
   13314 	return rv;
   13315 }
   13316 
   13317 /* iNVM */
   13318 
   13319 static int
   13320 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13321 {
   13322 	int32_t	 rv = 0;
   13323 	uint32_t invm_dword;
   13324 	uint16_t i;
   13325 	uint8_t record_type, word_address;
   13326 
   13327 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13328 		device_xname(sc->sc_dev), __func__));
   13329 
   13330 	for (i = 0; i < INVM_SIZE; i++) {
   13331 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13332 		/* Get record type */
   13333 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13334 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13335 			break;
   13336 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13337 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13338 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13339 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13340 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13341 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13342 			if (word_address == address) {
   13343 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13344 				rv = 0;
   13345 				break;
   13346 			}
   13347 		}
   13348 	}
   13349 
   13350 	return rv;
   13351 }
   13352 
   13353 static int
   13354 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13355 {
   13356 	int rv = 0;
   13357 	int i;
   13358 
   13359 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13360 		device_xname(sc->sc_dev), __func__));
   13361 
   13362 	if (sc->nvm.acquire(sc) != 0)
   13363 		return -1;
   13364 
   13365 	for (i = 0; i < words; i++) {
   13366 		switch (offset + i) {
   13367 		case NVM_OFF_MACADDR:
   13368 		case NVM_OFF_MACADDR1:
   13369 		case NVM_OFF_MACADDR2:
   13370 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13371 			if (rv != 0) {
   13372 				data[i] = 0xffff;
   13373 				rv = -1;
   13374 			}
   13375 			break;
   13376 		case NVM_OFF_CFG2:
   13377 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13378 			if (rv != 0) {
   13379 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13380 				rv = 0;
   13381 			}
   13382 			break;
   13383 		case NVM_OFF_CFG4:
   13384 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13385 			if (rv != 0) {
   13386 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13387 				rv = 0;
   13388 			}
   13389 			break;
   13390 		case NVM_OFF_LED_1_CFG:
   13391 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13392 			if (rv != 0) {
   13393 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13394 				rv = 0;
   13395 			}
   13396 			break;
   13397 		case NVM_OFF_LED_0_2_CFG:
   13398 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13399 			if (rv != 0) {
   13400 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13401 				rv = 0;
   13402 			}
   13403 			break;
   13404 		case NVM_OFF_ID_LED_SETTINGS:
   13405 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13406 			if (rv != 0) {
   13407 				*data = ID_LED_RESERVED_FFFF;
   13408 				rv = 0;
   13409 			}
   13410 			break;
   13411 		default:
   13412 			DPRINTF(WM_DEBUG_NVM,
   13413 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13414 			*data = NVM_RESERVED_WORD;
   13415 			break;
   13416 		}
   13417 	}
   13418 
   13419 	sc->nvm.release(sc);
   13420 	return rv;
   13421 }
   13422 
   13423 /* Lock, detecting NVM type, validate checksum, version and read */
   13424 
   13425 static int
   13426 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13427 {
   13428 	uint32_t eecd = 0;
   13429 
   13430 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13431 	    || sc->sc_type == WM_T_82583) {
   13432 		eecd = CSR_READ(sc, WMREG_EECD);
   13433 
   13434 		/* Isolate bits 15 & 16 */
   13435 		eecd = ((eecd >> 15) & 0x03);
   13436 
   13437 		/* If both bits are set, device is Flash type */
   13438 		if (eecd == 0x03)
   13439 			return 0;
   13440 	}
   13441 	return 1;
   13442 }
   13443 
   13444 static int
   13445 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13446 {
   13447 	uint32_t eec;
   13448 
   13449 	eec = CSR_READ(sc, WMREG_EEC);
   13450 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13451 		return 1;
   13452 
   13453 	return 0;
   13454 }
   13455 
   13456 /*
   13457  * wm_nvm_validate_checksum
   13458  *
   13459  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13460  */
   13461 static int
   13462 wm_nvm_validate_checksum(struct wm_softc *sc)
   13463 {
   13464 	uint16_t checksum;
   13465 	uint16_t eeprom_data;
   13466 #ifdef WM_DEBUG
   13467 	uint16_t csum_wordaddr, valid_checksum;
   13468 #endif
   13469 	int i;
   13470 
   13471 	checksum = 0;
   13472 
   13473 	/* Don't check for I211 */
   13474 	if (sc->sc_type == WM_T_I211)
   13475 		return 0;
   13476 
   13477 #ifdef WM_DEBUG
   13478 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13479 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13480 		csum_wordaddr = NVM_OFF_COMPAT;
   13481 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13482 	} else {
   13483 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13484 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13485 	}
   13486 
   13487 	/* Dump EEPROM image for debug */
   13488 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13489 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13490 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13491 		/* XXX PCH_SPT? */
   13492 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13493 		if ((eeprom_data & valid_checksum) == 0)
   13494 			DPRINTF(WM_DEBUG_NVM,
   13495 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13496 				device_xname(sc->sc_dev), eeprom_data,
   13497 				    valid_checksum));
   13498 	}
   13499 
   13500 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13501 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13502 		for (i = 0; i < NVM_SIZE; i++) {
   13503 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13504 				printf("XXXX ");
   13505 			else
   13506 				printf("%04hx ", eeprom_data);
   13507 			if (i % 8 == 7)
   13508 				printf("\n");
   13509 		}
   13510 	}
   13511 
   13512 #endif /* WM_DEBUG */
   13513 
   13514 	for (i = 0; i < NVM_SIZE; i++) {
   13515 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13516 			return 1;
   13517 		checksum += eeprom_data;
   13518 	}
   13519 
   13520 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13521 #ifdef WM_DEBUG
   13522 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13523 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13524 #endif
   13525 	}
   13526 
   13527 	return 0;
   13528 }
   13529 
   13530 static void
   13531 wm_nvm_version_invm(struct wm_softc *sc)
   13532 {
   13533 	uint32_t dword;
   13534 
   13535 	/*
   13536 	 * Linux's code to decode version is very strange, so we don't
   13537 	 * obey that algorithm and just use word 61 as the document.
   13538 	 * Perhaps it's not perfect though...
   13539 	 *
   13540 	 * Example:
   13541 	 *
   13542 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13543 	 */
   13544 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13545 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13546 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13547 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13548 }
   13549 
   13550 static void
   13551 wm_nvm_version(struct wm_softc *sc)
   13552 {
   13553 	uint16_t major, minor, build, patch;
   13554 	uint16_t uid0, uid1;
   13555 	uint16_t nvm_data;
   13556 	uint16_t off;
   13557 	bool check_version = false;
   13558 	bool check_optionrom = false;
   13559 	bool have_build = false;
   13560 	bool have_uid = true;
   13561 
   13562 	/*
   13563 	 * Version format:
   13564 	 *
   13565 	 * XYYZ
   13566 	 * X0YZ
   13567 	 * X0YY
   13568 	 *
   13569 	 * Example:
   13570 	 *
   13571 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13572 	 *	82571	0x50a6	5.10.6?
   13573 	 *	82572	0x506a	5.6.10?
   13574 	 *	82572EI	0x5069	5.6.9?
   13575 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13576 	 *		0x2013	2.1.3?
   13577 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13578 	 * ICH8+82567	0x0040	0.4.0?
   13579 	 * ICH9+82566	0x1040	1.4.0?
   13580 	 *ICH10+82567	0x0043	0.4.3?
   13581 	 *  PCH+82577	0x00c1	0.12.1?
   13582 	 * PCH2+82579	0x00d3	0.13.3?
   13583 	 *		0x00d4	0.13.4?
   13584 	 *  LPT+I218	0x0023	0.2.3?
   13585 	 *  SPT+I219	0x0084	0.8.4?
   13586 	 *  CNP+I219	0x0054	0.5.4?
   13587 	 */
   13588 
   13589 	/*
   13590 	 * XXX
   13591 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13592 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13593 	 */
   13594 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13595 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13596 		have_uid = false;
   13597 
   13598 	switch (sc->sc_type) {
   13599 	case WM_T_82571:
   13600 	case WM_T_82572:
   13601 	case WM_T_82574:
   13602 	case WM_T_82583:
   13603 		check_version = true;
   13604 		check_optionrom = true;
   13605 		have_build = true;
   13606 		break;
   13607 	case WM_T_ICH8:
   13608 	case WM_T_ICH9:
   13609 	case WM_T_ICH10:
   13610 	case WM_T_PCH:
   13611 	case WM_T_PCH2:
   13612 	case WM_T_PCH_LPT:
   13613 	case WM_T_PCH_SPT:
   13614 	case WM_T_PCH_CNP:
   13615 		check_version = true;
   13616 		have_build = true;
   13617 		have_uid = false;
   13618 		break;
   13619 	case WM_T_82575:
   13620 	case WM_T_82576:
   13621 	case WM_T_82580:
   13622 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13623 			check_version = true;
   13624 		break;
   13625 	case WM_T_I211:
   13626 		wm_nvm_version_invm(sc);
   13627 		have_uid = false;
   13628 		goto printver;
   13629 	case WM_T_I210:
   13630 		if (!wm_nvm_flash_presence_i210(sc)) {
   13631 			wm_nvm_version_invm(sc);
   13632 			have_uid = false;
   13633 			goto printver;
   13634 		}
   13635 		/* FALLTHROUGH */
   13636 	case WM_T_I350:
   13637 	case WM_T_I354:
   13638 		check_version = true;
   13639 		check_optionrom = true;
   13640 		break;
   13641 	default:
   13642 		return;
   13643 	}
   13644 	if (check_version
   13645 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13646 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13647 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13648 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13649 			build = nvm_data & NVM_BUILD_MASK;
   13650 			have_build = true;
   13651 		} else
   13652 			minor = nvm_data & 0x00ff;
   13653 
   13654 		/* Decimal */
   13655 		minor = (minor / 16) * 10 + (minor % 16);
   13656 		sc->sc_nvm_ver_major = major;
   13657 		sc->sc_nvm_ver_minor = minor;
   13658 
   13659 printver:
   13660 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13661 		    sc->sc_nvm_ver_minor);
   13662 		if (have_build) {
   13663 			sc->sc_nvm_ver_build = build;
   13664 			aprint_verbose(".%d", build);
   13665 		}
   13666 	}
   13667 
   13668 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13669 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13670 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13671 		/* Option ROM Version */
   13672 		if ((off != 0x0000) && (off != 0xffff)) {
   13673 			int rv;
   13674 
   13675 			off += NVM_COMBO_VER_OFF;
   13676 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13677 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13678 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13679 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13680 				/* 16bits */
   13681 				major = uid0 >> 8;
   13682 				build = (uid0 << 8) | (uid1 >> 8);
   13683 				patch = uid1 & 0x00ff;
   13684 				aprint_verbose(", option ROM Version %d.%d.%d",
   13685 				    major, build, patch);
   13686 			}
   13687 		}
   13688 	}
   13689 
   13690 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13691 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13692 }
   13693 
   13694 /*
   13695  * wm_nvm_read:
   13696  *
   13697  *	Read data from the serial EEPROM.
   13698  */
   13699 static int
   13700 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13701 {
   13702 	int rv;
   13703 
   13704 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13705 		device_xname(sc->sc_dev), __func__));
   13706 
   13707 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13708 		return -1;
   13709 
   13710 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13711 
   13712 	return rv;
   13713 }
   13714 
   13715 /*
   13716  * Hardware semaphores.
   13717  * Very complexed...
   13718  */
   13719 
   13720 static int
   13721 wm_get_null(struct wm_softc *sc)
   13722 {
   13723 
   13724 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13725 		device_xname(sc->sc_dev), __func__));
   13726 	return 0;
   13727 }
   13728 
   13729 static void
   13730 wm_put_null(struct wm_softc *sc)
   13731 {
   13732 
   13733 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13734 		device_xname(sc->sc_dev), __func__));
   13735 	return;
   13736 }
   13737 
   13738 static int
   13739 wm_get_eecd(struct wm_softc *sc)
   13740 {
   13741 	uint32_t reg;
   13742 	int x;
   13743 
   13744 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13745 		device_xname(sc->sc_dev), __func__));
   13746 
   13747 	reg = CSR_READ(sc, WMREG_EECD);
   13748 
   13749 	/* Request EEPROM access. */
   13750 	reg |= EECD_EE_REQ;
   13751 	CSR_WRITE(sc, WMREG_EECD, reg);
   13752 
   13753 	/* ..and wait for it to be granted. */
   13754 	for (x = 0; x < 1000; x++) {
   13755 		reg = CSR_READ(sc, WMREG_EECD);
   13756 		if (reg & EECD_EE_GNT)
   13757 			break;
   13758 		delay(5);
   13759 	}
   13760 	if ((reg & EECD_EE_GNT) == 0) {
   13761 		aprint_error_dev(sc->sc_dev,
   13762 		    "could not acquire EEPROM GNT\n");
   13763 		reg &= ~EECD_EE_REQ;
   13764 		CSR_WRITE(sc, WMREG_EECD, reg);
   13765 		return -1;
   13766 	}
   13767 
   13768 	return 0;
   13769 }
   13770 
   13771 static void
   13772 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13773 {
   13774 
   13775 	*eecd |= EECD_SK;
   13776 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13777 	CSR_WRITE_FLUSH(sc);
   13778 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13779 		delay(1);
   13780 	else
   13781 		delay(50);
   13782 }
   13783 
   13784 static void
   13785 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13786 {
   13787 
   13788 	*eecd &= ~EECD_SK;
   13789 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13790 	CSR_WRITE_FLUSH(sc);
   13791 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13792 		delay(1);
   13793 	else
   13794 		delay(50);
   13795 }
   13796 
   13797 static void
   13798 wm_put_eecd(struct wm_softc *sc)
   13799 {
   13800 	uint32_t reg;
   13801 
   13802 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13803 		device_xname(sc->sc_dev), __func__));
   13804 
   13805 	/* Stop nvm */
   13806 	reg = CSR_READ(sc, WMREG_EECD);
   13807 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13808 		/* Pull CS high */
   13809 		reg |= EECD_CS;
   13810 		wm_nvm_eec_clock_lower(sc, &reg);
   13811 	} else {
   13812 		/* CS on Microwire is active-high */
   13813 		reg &= ~(EECD_CS | EECD_DI);
   13814 		CSR_WRITE(sc, WMREG_EECD, reg);
   13815 		wm_nvm_eec_clock_raise(sc, &reg);
   13816 		wm_nvm_eec_clock_lower(sc, &reg);
   13817 	}
   13818 
   13819 	reg = CSR_READ(sc, WMREG_EECD);
   13820 	reg &= ~EECD_EE_REQ;
   13821 	CSR_WRITE(sc, WMREG_EECD, reg);
   13822 
   13823 	return;
   13824 }
   13825 
   13826 /*
   13827  * Get hardware semaphore.
   13828  * Same as e1000_get_hw_semaphore_generic()
   13829  */
   13830 static int
   13831 wm_get_swsm_semaphore(struct wm_softc *sc)
   13832 {
   13833 	int32_t timeout;
   13834 	uint32_t swsm;
   13835 
   13836 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13837 		device_xname(sc->sc_dev), __func__));
   13838 	KASSERT(sc->sc_nvm_wordsize > 0);
   13839 
   13840 retry:
   13841 	/* Get the SW semaphore. */
   13842 	timeout = sc->sc_nvm_wordsize + 1;
   13843 	while (timeout) {
   13844 		swsm = CSR_READ(sc, WMREG_SWSM);
   13845 
   13846 		if ((swsm & SWSM_SMBI) == 0)
   13847 			break;
   13848 
   13849 		delay(50);
   13850 		timeout--;
   13851 	}
   13852 
   13853 	if (timeout == 0) {
   13854 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13855 			/*
   13856 			 * In rare circumstances, the SW semaphore may already
   13857 			 * be held unintentionally. Clear the semaphore once
   13858 			 * before giving up.
   13859 			 */
   13860 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13861 			wm_put_swsm_semaphore(sc);
   13862 			goto retry;
   13863 		}
   13864 		aprint_error_dev(sc->sc_dev,
   13865 		    "could not acquire SWSM SMBI\n");
   13866 		return 1;
   13867 	}
   13868 
   13869 	/* Get the FW semaphore. */
   13870 	timeout = sc->sc_nvm_wordsize + 1;
   13871 	while (timeout) {
   13872 		swsm = CSR_READ(sc, WMREG_SWSM);
   13873 		swsm |= SWSM_SWESMBI;
   13874 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13875 		/* If we managed to set the bit we got the semaphore. */
   13876 		swsm = CSR_READ(sc, WMREG_SWSM);
   13877 		if (swsm & SWSM_SWESMBI)
   13878 			break;
   13879 
   13880 		delay(50);
   13881 		timeout--;
   13882 	}
   13883 
   13884 	if (timeout == 0) {
   13885 		aprint_error_dev(sc->sc_dev,
   13886 		    "could not acquire SWSM SWESMBI\n");
   13887 		/* Release semaphores */
   13888 		wm_put_swsm_semaphore(sc);
   13889 		return 1;
   13890 	}
   13891 	return 0;
   13892 }
   13893 
   13894 /*
   13895  * Put hardware semaphore.
   13896  * Same as e1000_put_hw_semaphore_generic()
   13897  */
   13898 static void
   13899 wm_put_swsm_semaphore(struct wm_softc *sc)
   13900 {
   13901 	uint32_t swsm;
   13902 
   13903 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13904 		device_xname(sc->sc_dev), __func__));
   13905 
   13906 	swsm = CSR_READ(sc, WMREG_SWSM);
   13907 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13908 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13909 }
   13910 
   13911 /*
   13912  * Get SW/FW semaphore.
   13913  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13914  */
   13915 static int
   13916 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13917 {
   13918 	uint32_t swfw_sync;
   13919 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13920 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13921 	int timeout;
   13922 
   13923 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13924 		device_xname(sc->sc_dev), __func__));
   13925 
   13926 	if (sc->sc_type == WM_T_80003)
   13927 		timeout = 50;
   13928 	else
   13929 		timeout = 200;
   13930 
   13931 	while (timeout) {
   13932 		if (wm_get_swsm_semaphore(sc)) {
   13933 			aprint_error_dev(sc->sc_dev,
   13934 			    "%s: failed to get semaphore\n",
   13935 			    __func__);
   13936 			return 1;
   13937 		}
   13938 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13939 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13940 			swfw_sync |= swmask;
   13941 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13942 			wm_put_swsm_semaphore(sc);
   13943 			return 0;
   13944 		}
   13945 		wm_put_swsm_semaphore(sc);
   13946 		delay(5000);
   13947 		timeout--;
   13948 	}
   13949 	device_printf(sc->sc_dev,
   13950 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13951 	    mask, swfw_sync);
   13952 	return 1;
   13953 }
   13954 
   13955 static void
   13956 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13957 {
   13958 	uint32_t swfw_sync;
   13959 
   13960 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13961 		device_xname(sc->sc_dev), __func__));
   13962 
   13963 	while (wm_get_swsm_semaphore(sc) != 0)
   13964 		continue;
   13965 
   13966 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13967 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13968 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13969 
   13970 	wm_put_swsm_semaphore(sc);
   13971 }
   13972 
   13973 static int
   13974 wm_get_nvm_80003(struct wm_softc *sc)
   13975 {
   13976 	int rv;
   13977 
   13978 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13979 		device_xname(sc->sc_dev), __func__));
   13980 
   13981 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13982 		aprint_error_dev(sc->sc_dev,
   13983 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   13984 		return rv;
   13985 	}
   13986 
   13987 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13988 	    && (rv = wm_get_eecd(sc)) != 0) {
   13989 		aprint_error_dev(sc->sc_dev,
   13990 		    "%s: failed to get semaphore(EECD)\n", __func__);
   13991 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13992 		return rv;
   13993 	}
   13994 
   13995 	return 0;
   13996 }
   13997 
   13998 static void
   13999 wm_put_nvm_80003(struct wm_softc *sc)
   14000 {
   14001 
   14002 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14003 		device_xname(sc->sc_dev), __func__));
   14004 
   14005 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14006 		wm_put_eecd(sc);
   14007 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14008 }
   14009 
   14010 static int
   14011 wm_get_nvm_82571(struct wm_softc *sc)
   14012 {
   14013 	int rv;
   14014 
   14015 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14016 		device_xname(sc->sc_dev), __func__));
   14017 
   14018 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14019 		return rv;
   14020 
   14021 	switch (sc->sc_type) {
   14022 	case WM_T_82573:
   14023 		break;
   14024 	default:
   14025 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14026 			rv = wm_get_eecd(sc);
   14027 		break;
   14028 	}
   14029 
   14030 	if (rv != 0) {
   14031 		aprint_error_dev(sc->sc_dev,
   14032 		    "%s: failed to get semaphore\n",
   14033 		    __func__);
   14034 		wm_put_swsm_semaphore(sc);
   14035 	}
   14036 
   14037 	return rv;
   14038 }
   14039 
   14040 static void
   14041 wm_put_nvm_82571(struct wm_softc *sc)
   14042 {
   14043 
   14044 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14045 		device_xname(sc->sc_dev), __func__));
   14046 
   14047 	switch (sc->sc_type) {
   14048 	case WM_T_82573:
   14049 		break;
   14050 	default:
   14051 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14052 			wm_put_eecd(sc);
   14053 		break;
   14054 	}
   14055 
   14056 	wm_put_swsm_semaphore(sc);
   14057 }
   14058 
   14059 static int
   14060 wm_get_phy_82575(struct wm_softc *sc)
   14061 {
   14062 
   14063 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14064 		device_xname(sc->sc_dev), __func__));
   14065 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14066 }
   14067 
   14068 static void
   14069 wm_put_phy_82575(struct wm_softc *sc)
   14070 {
   14071 
   14072 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14073 		device_xname(sc->sc_dev), __func__));
   14074 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14075 }
   14076 
   14077 static int
   14078 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14079 {
   14080 	uint32_t ext_ctrl;
   14081 	int timeout = 200;
   14082 
   14083 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14084 		device_xname(sc->sc_dev), __func__));
   14085 
   14086 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14087 	for (timeout = 0; timeout < 200; timeout++) {
   14088 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14089 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14090 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14091 
   14092 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14093 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14094 			return 0;
   14095 		delay(5000);
   14096 	}
   14097 	device_printf(sc->sc_dev,
   14098 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14099 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14100 	return 1;
   14101 }
   14102 
   14103 static void
   14104 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14105 {
   14106 	uint32_t ext_ctrl;
   14107 
   14108 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14109 		device_xname(sc->sc_dev), __func__));
   14110 
   14111 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14112 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14113 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14114 
   14115 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14116 }
   14117 
   14118 static int
   14119 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14120 {
   14121 	uint32_t ext_ctrl;
   14122 	int timeout;
   14123 
   14124 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14125 		device_xname(sc->sc_dev), __func__));
   14126 	mutex_enter(sc->sc_ich_phymtx);
   14127 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14128 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14129 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14130 			break;
   14131 		delay(1000);
   14132 	}
   14133 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14134 		device_printf(sc->sc_dev,
   14135 		    "SW has already locked the resource\n");
   14136 		goto out;
   14137 	}
   14138 
   14139 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14140 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14141 	for (timeout = 0; timeout < 1000; timeout++) {
   14142 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14143 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14144 			break;
   14145 		delay(1000);
   14146 	}
   14147 	if (timeout >= 1000) {
   14148 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14149 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14150 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14151 		goto out;
   14152 	}
   14153 	return 0;
   14154 
   14155 out:
   14156 	mutex_exit(sc->sc_ich_phymtx);
   14157 	return 1;
   14158 }
   14159 
   14160 static void
   14161 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14162 {
   14163 	uint32_t ext_ctrl;
   14164 
   14165 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14166 		device_xname(sc->sc_dev), __func__));
   14167 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14168 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14169 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14170 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14171 	} else {
   14172 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14173 	}
   14174 
   14175 	mutex_exit(sc->sc_ich_phymtx);
   14176 }
   14177 
   14178 static int
   14179 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14180 {
   14181 
   14182 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14183 		device_xname(sc->sc_dev), __func__));
   14184 	mutex_enter(sc->sc_ich_nvmmtx);
   14185 
   14186 	return 0;
   14187 }
   14188 
   14189 static void
   14190 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14191 {
   14192 
   14193 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14194 		device_xname(sc->sc_dev), __func__));
   14195 	mutex_exit(sc->sc_ich_nvmmtx);
   14196 }
   14197 
   14198 static int
   14199 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14200 {
   14201 	int i = 0;
   14202 	uint32_t reg;
   14203 
   14204 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14205 		device_xname(sc->sc_dev), __func__));
   14206 
   14207 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14208 	do {
   14209 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14210 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14211 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14212 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14213 			break;
   14214 		delay(2*1000);
   14215 		i++;
   14216 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14217 
   14218 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14219 		wm_put_hw_semaphore_82573(sc);
   14220 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14221 		    device_xname(sc->sc_dev));
   14222 		return -1;
   14223 	}
   14224 
   14225 	return 0;
   14226 }
   14227 
   14228 static void
   14229 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14230 {
   14231 	uint32_t reg;
   14232 
   14233 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14234 		device_xname(sc->sc_dev), __func__));
   14235 
   14236 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14237 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14238 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14239 }
   14240 
   14241 /*
   14242  * Management mode and power management related subroutines.
   14243  * BMC, AMT, suspend/resume and EEE.
   14244  */
   14245 
   14246 #ifdef WM_WOL
   14247 static int
   14248 wm_check_mng_mode(struct wm_softc *sc)
   14249 {
   14250 	int rv;
   14251 
   14252 	switch (sc->sc_type) {
   14253 	case WM_T_ICH8:
   14254 	case WM_T_ICH9:
   14255 	case WM_T_ICH10:
   14256 	case WM_T_PCH:
   14257 	case WM_T_PCH2:
   14258 	case WM_T_PCH_LPT:
   14259 	case WM_T_PCH_SPT:
   14260 	case WM_T_PCH_CNP:
   14261 		rv = wm_check_mng_mode_ich8lan(sc);
   14262 		break;
   14263 	case WM_T_82574:
   14264 	case WM_T_82583:
   14265 		rv = wm_check_mng_mode_82574(sc);
   14266 		break;
   14267 	case WM_T_82571:
   14268 	case WM_T_82572:
   14269 	case WM_T_82573:
   14270 	case WM_T_80003:
   14271 		rv = wm_check_mng_mode_generic(sc);
   14272 		break;
   14273 	default:
   14274 		/* Noting to do */
   14275 		rv = 0;
   14276 		break;
   14277 	}
   14278 
   14279 	return rv;
   14280 }
   14281 
   14282 static int
   14283 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14284 {
   14285 	uint32_t fwsm;
   14286 
   14287 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14288 
   14289 	if (((fwsm & FWSM_FW_VALID) != 0)
   14290 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14291 		return 1;
   14292 
   14293 	return 0;
   14294 }
   14295 
   14296 static int
   14297 wm_check_mng_mode_82574(struct wm_softc *sc)
   14298 {
   14299 	uint16_t data;
   14300 
   14301 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14302 
   14303 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14304 		return 1;
   14305 
   14306 	return 0;
   14307 }
   14308 
   14309 static int
   14310 wm_check_mng_mode_generic(struct wm_softc *sc)
   14311 {
   14312 	uint32_t fwsm;
   14313 
   14314 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14315 
   14316 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14317 		return 1;
   14318 
   14319 	return 0;
   14320 }
   14321 #endif /* WM_WOL */
   14322 
   14323 static int
   14324 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14325 {
   14326 	uint32_t manc, fwsm, factps;
   14327 
   14328 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14329 		return 0;
   14330 
   14331 	manc = CSR_READ(sc, WMREG_MANC);
   14332 
   14333 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14334 		device_xname(sc->sc_dev), manc));
   14335 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14336 		return 0;
   14337 
   14338 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14339 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14340 		factps = CSR_READ(sc, WMREG_FACTPS);
   14341 		if (((factps & FACTPS_MNGCG) == 0)
   14342 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14343 			return 1;
   14344 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14345 		uint16_t data;
   14346 
   14347 		factps = CSR_READ(sc, WMREG_FACTPS);
   14348 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14349 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14350 			device_xname(sc->sc_dev), factps, data));
   14351 		if (((factps & FACTPS_MNGCG) == 0)
   14352 		    && ((data & NVM_CFG2_MNGM_MASK)
   14353 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14354 			return 1;
   14355 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14356 	    && ((manc & MANC_ASF_EN) == 0))
   14357 		return 1;
   14358 
   14359 	return 0;
   14360 }
   14361 
   14362 static bool
   14363 wm_phy_resetisblocked(struct wm_softc *sc)
   14364 {
   14365 	bool blocked = false;
   14366 	uint32_t reg;
   14367 	int i = 0;
   14368 
   14369 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14370 		device_xname(sc->sc_dev), __func__));
   14371 
   14372 	switch (sc->sc_type) {
   14373 	case WM_T_ICH8:
   14374 	case WM_T_ICH9:
   14375 	case WM_T_ICH10:
   14376 	case WM_T_PCH:
   14377 	case WM_T_PCH2:
   14378 	case WM_T_PCH_LPT:
   14379 	case WM_T_PCH_SPT:
   14380 	case WM_T_PCH_CNP:
   14381 		do {
   14382 			reg = CSR_READ(sc, WMREG_FWSM);
   14383 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14384 				blocked = true;
   14385 				delay(10*1000);
   14386 				continue;
   14387 			}
   14388 			blocked = false;
   14389 		} while (blocked && (i++ < 30));
   14390 		return blocked;
   14391 		break;
   14392 	case WM_T_82571:
   14393 	case WM_T_82572:
   14394 	case WM_T_82573:
   14395 	case WM_T_82574:
   14396 	case WM_T_82583:
   14397 	case WM_T_80003:
   14398 		reg = CSR_READ(sc, WMREG_MANC);
   14399 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14400 			return true;
   14401 		else
   14402 			return false;
   14403 		break;
   14404 	default:
   14405 		/* No problem */
   14406 		break;
   14407 	}
   14408 
   14409 	return false;
   14410 }
   14411 
   14412 static void
   14413 wm_get_hw_control(struct wm_softc *sc)
   14414 {
   14415 	uint32_t reg;
   14416 
   14417 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14418 		device_xname(sc->sc_dev), __func__));
   14419 
   14420 	if (sc->sc_type == WM_T_82573) {
   14421 		reg = CSR_READ(sc, WMREG_SWSM);
   14422 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14423 	} else if (sc->sc_type >= WM_T_82571) {
   14424 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14425 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14426 	}
   14427 }
   14428 
   14429 static void
   14430 wm_release_hw_control(struct wm_softc *sc)
   14431 {
   14432 	uint32_t reg;
   14433 
   14434 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14435 		device_xname(sc->sc_dev), __func__));
   14436 
   14437 	if (sc->sc_type == WM_T_82573) {
   14438 		reg = CSR_READ(sc, WMREG_SWSM);
   14439 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14440 	} else if (sc->sc_type >= WM_T_82571) {
   14441 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14442 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14443 	}
   14444 }
   14445 
   14446 static void
   14447 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14448 {
   14449 	uint32_t reg;
   14450 
   14451 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14452 		device_xname(sc->sc_dev), __func__));
   14453 
   14454 	if (sc->sc_type < WM_T_PCH2)
   14455 		return;
   14456 
   14457 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14458 
   14459 	if (gate)
   14460 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14461 	else
   14462 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14463 
   14464 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14465 }
   14466 
   14467 static int
   14468 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14469 {
   14470 	uint32_t fwsm, reg;
   14471 	int rv = 0;
   14472 
   14473 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14474 		device_xname(sc->sc_dev), __func__));
   14475 
   14476 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14477 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14478 
   14479 	/* Disable ULP */
   14480 	wm_ulp_disable(sc);
   14481 
   14482 	/* Acquire PHY semaphore */
   14483 	rv = sc->phy.acquire(sc);
   14484 	if (rv != 0) {
   14485 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14486 		device_xname(sc->sc_dev), __func__));
   14487 		return -1;
   14488 	}
   14489 
   14490 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14491 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14492 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14493 	 */
   14494 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14495 	switch (sc->sc_type) {
   14496 	case WM_T_PCH_LPT:
   14497 	case WM_T_PCH_SPT:
   14498 	case WM_T_PCH_CNP:
   14499 		if (wm_phy_is_accessible_pchlan(sc))
   14500 			break;
   14501 
   14502 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14503 		 * forcing MAC to SMBus mode first.
   14504 		 */
   14505 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14506 		reg |= CTRL_EXT_FORCE_SMBUS;
   14507 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14508 #if 0
   14509 		/* XXX Isn't this required??? */
   14510 		CSR_WRITE_FLUSH(sc);
   14511 #endif
   14512 		/* Wait 50 milliseconds for MAC to finish any retries
   14513 		 * that it might be trying to perform from previous
   14514 		 * attempts to acknowledge any phy read requests.
   14515 		 */
   14516 		delay(50 * 1000);
   14517 		/* FALLTHROUGH */
   14518 	case WM_T_PCH2:
   14519 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14520 			break;
   14521 		/* FALLTHROUGH */
   14522 	case WM_T_PCH:
   14523 		if (sc->sc_type == WM_T_PCH)
   14524 			if ((fwsm & FWSM_FW_VALID) != 0)
   14525 				break;
   14526 
   14527 		if (wm_phy_resetisblocked(sc) == true) {
   14528 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   14529 			break;
   14530 		}
   14531 
   14532 		/* Toggle LANPHYPC Value bit */
   14533 		wm_toggle_lanphypc_pch_lpt(sc);
   14534 
   14535 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14536 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14537 				break;
   14538 
   14539 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14540 			 * so ensure that the MAC is also out of SMBus mode
   14541 			 */
   14542 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14543 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14544 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14545 
   14546 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14547 				break;
   14548 			rv = -1;
   14549 		}
   14550 		break;
   14551 	default:
   14552 		break;
   14553 	}
   14554 
   14555 	/* Release semaphore */
   14556 	sc->phy.release(sc);
   14557 
   14558 	if (rv == 0) {
   14559 		/* Check to see if able to reset PHY.  Print error if not */
   14560 		if (wm_phy_resetisblocked(sc)) {
   14561 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14562 			goto out;
   14563 		}
   14564 
   14565 		/* Reset the PHY before any access to it.  Doing so, ensures
   14566 		 * that the PHY is in a known good state before we read/write
   14567 		 * PHY registers.  The generic reset is sufficient here,
   14568 		 * because we haven't determined the PHY type yet.
   14569 		 */
   14570 		if (wm_reset_phy(sc) != 0)
   14571 			goto out;
   14572 
   14573 		/* On a successful reset, possibly need to wait for the PHY
   14574 		 * to quiesce to an accessible state before returning control
   14575 		 * to the calling function.  If the PHY does not quiesce, then
   14576 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14577 		 *  the PHY is in.
   14578 		 */
   14579 		if (wm_phy_resetisblocked(sc))
   14580 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14581 	}
   14582 
   14583 out:
   14584 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14585 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14586 		delay(10*1000);
   14587 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14588 	}
   14589 
   14590 	return 0;
   14591 }
   14592 
   14593 static void
   14594 wm_init_manageability(struct wm_softc *sc)
   14595 {
   14596 
   14597 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14598 		device_xname(sc->sc_dev), __func__));
   14599 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14600 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14601 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14602 
   14603 		/* Disable hardware interception of ARP */
   14604 		manc &= ~MANC_ARP_EN;
   14605 
   14606 		/* Enable receiving management packets to the host */
   14607 		if (sc->sc_type >= WM_T_82571) {
   14608 			manc |= MANC_EN_MNG2HOST;
   14609 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14610 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14611 		}
   14612 
   14613 		CSR_WRITE(sc, WMREG_MANC, manc);
   14614 	}
   14615 }
   14616 
   14617 static void
   14618 wm_release_manageability(struct wm_softc *sc)
   14619 {
   14620 
   14621 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14622 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14623 
   14624 		manc |= MANC_ARP_EN;
   14625 		if (sc->sc_type >= WM_T_82571)
   14626 			manc &= ~MANC_EN_MNG2HOST;
   14627 
   14628 		CSR_WRITE(sc, WMREG_MANC, manc);
   14629 	}
   14630 }
   14631 
   14632 static void
   14633 wm_get_wakeup(struct wm_softc *sc)
   14634 {
   14635 
   14636 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14637 	switch (sc->sc_type) {
   14638 	case WM_T_82573:
   14639 	case WM_T_82583:
   14640 		sc->sc_flags |= WM_F_HAS_AMT;
   14641 		/* FALLTHROUGH */
   14642 	case WM_T_80003:
   14643 	case WM_T_82575:
   14644 	case WM_T_82576:
   14645 	case WM_T_82580:
   14646 	case WM_T_I350:
   14647 	case WM_T_I354:
   14648 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14649 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14650 		/* FALLTHROUGH */
   14651 	case WM_T_82541:
   14652 	case WM_T_82541_2:
   14653 	case WM_T_82547:
   14654 	case WM_T_82547_2:
   14655 	case WM_T_82571:
   14656 	case WM_T_82572:
   14657 	case WM_T_82574:
   14658 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14659 		break;
   14660 	case WM_T_ICH8:
   14661 	case WM_T_ICH9:
   14662 	case WM_T_ICH10:
   14663 	case WM_T_PCH:
   14664 	case WM_T_PCH2:
   14665 	case WM_T_PCH_LPT:
   14666 	case WM_T_PCH_SPT:
   14667 	case WM_T_PCH_CNP:
   14668 		sc->sc_flags |= WM_F_HAS_AMT;
   14669 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14670 		break;
   14671 	default:
   14672 		break;
   14673 	}
   14674 
   14675 	/* 1: HAS_MANAGE */
   14676 	if (wm_enable_mng_pass_thru(sc) != 0)
   14677 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14678 
   14679 	/*
   14680 	 * Note that the WOL flags is set after the resetting of the eeprom
   14681 	 * stuff
   14682 	 */
   14683 }
   14684 
   14685 /*
   14686  * Unconfigure Ultra Low Power mode.
   14687  * Only for I217 and newer (see below).
   14688  */
   14689 static int
   14690 wm_ulp_disable(struct wm_softc *sc)
   14691 {
   14692 	uint32_t reg;
   14693 	uint16_t phyreg;
   14694 	int i = 0, rv = 0;
   14695 
   14696 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14697 		device_xname(sc->sc_dev), __func__));
   14698 	/* Exclude old devices */
   14699 	if ((sc->sc_type < WM_T_PCH_LPT)
   14700 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14701 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14702 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14703 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14704 		return 0;
   14705 
   14706 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14707 		/* Request ME un-configure ULP mode in the PHY */
   14708 		reg = CSR_READ(sc, WMREG_H2ME);
   14709 		reg &= ~H2ME_ULP;
   14710 		reg |= H2ME_ENFORCE_SETTINGS;
   14711 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14712 
   14713 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14714 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14715 			if (i++ == 30) {
   14716 				device_printf(sc->sc_dev, "%s timed out\n",
   14717 				    __func__);
   14718 				return -1;
   14719 			}
   14720 			delay(10 * 1000);
   14721 		}
   14722 		reg = CSR_READ(sc, WMREG_H2ME);
   14723 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14724 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14725 
   14726 		return 0;
   14727 	}
   14728 
   14729 	/* Acquire semaphore */
   14730 	rv = sc->phy.acquire(sc);
   14731 	if (rv != 0) {
   14732 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14733 		device_xname(sc->sc_dev), __func__));
   14734 		return -1;
   14735 	}
   14736 
   14737 	/* Toggle LANPHYPC */
   14738 	wm_toggle_lanphypc_pch_lpt(sc);
   14739 
   14740 	/* Unforce SMBus mode in PHY */
   14741 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14742 	if (rv != 0) {
   14743 		uint32_t reg2;
   14744 
   14745 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   14746 			__func__);
   14747 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14748 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14749 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14750 		delay(50 * 1000);
   14751 
   14752 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14753 		    &phyreg);
   14754 		if (rv != 0)
   14755 			goto release;
   14756 	}
   14757 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14758 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14759 
   14760 	/* Unforce SMBus mode in MAC */
   14761 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14762 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14763 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14764 
   14765 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14766 	if (rv != 0)
   14767 		goto release;
   14768 	phyreg |= HV_PM_CTRL_K1_ENA;
   14769 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14770 
   14771 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14772 		&phyreg);
   14773 	if (rv != 0)
   14774 		goto release;
   14775 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14776 	    | I218_ULP_CONFIG1_STICKY_ULP
   14777 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14778 	    | I218_ULP_CONFIG1_WOL_HOST
   14779 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14780 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14781 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14782 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14783 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14784 	phyreg |= I218_ULP_CONFIG1_START;
   14785 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14786 
   14787 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14788 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14789 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14790 
   14791 release:
   14792 	/* Release semaphore */
   14793 	sc->phy.release(sc);
   14794 	wm_gmii_reset(sc);
   14795 	delay(50 * 1000);
   14796 
   14797 	return rv;
   14798 }
   14799 
   14800 /* WOL in the newer chipset interfaces (pchlan) */
   14801 static int
   14802 wm_enable_phy_wakeup(struct wm_softc *sc)
   14803 {
   14804 	device_t dev = sc->sc_dev;
   14805 	uint32_t mreg, moff;
   14806 	uint16_t wuce, wuc, wufc, preg;
   14807 	int i, rv;
   14808 
   14809 	KASSERT(sc->sc_type >= WM_T_PCH);
   14810 
   14811 	/* Copy MAC RARs to PHY RARs */
   14812 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   14813 
   14814 	/* Activate PHY wakeup */
   14815 	rv = sc->phy.acquire(sc);
   14816 	if (rv != 0) {
   14817 		device_printf(dev, "%s: failed to acquire semaphore\n",
   14818 		    __func__);
   14819 		return rv;
   14820 	}
   14821 
   14822 	/*
   14823 	 * Enable access to PHY wakeup registers.
   14824 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   14825 	 */
   14826 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   14827 	if (rv != 0) {
   14828 		device_printf(dev,
   14829 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   14830 		goto release;
   14831 	}
   14832 
   14833 	/* Copy MAC MTA to PHY MTA */
   14834 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   14835 		uint16_t lo, hi;
   14836 
   14837 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   14838 		lo = (uint16_t)(mreg & 0xffff);
   14839 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   14840 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   14841 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   14842 	}
   14843 
   14844 	/* Configure PHY Rx Control register */
   14845 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   14846 	mreg = CSR_READ(sc, WMREG_RCTL);
   14847 	if (mreg & RCTL_UPE)
   14848 		preg |= BM_RCTL_UPE;
   14849 	if (mreg & RCTL_MPE)
   14850 		preg |= BM_RCTL_MPE;
   14851 	preg &= ~(BM_RCTL_MO_MASK);
   14852 	moff = __SHIFTOUT(mreg, RCTL_MO);
   14853 	if (moff != 0)
   14854 		preg |= moff << BM_RCTL_MO_SHIFT;
   14855 	if (mreg & RCTL_BAM)
   14856 		preg |= BM_RCTL_BAM;
   14857 	if (mreg & RCTL_PMCF)
   14858 		preg |= BM_RCTL_PMCF;
   14859 	mreg = CSR_READ(sc, WMREG_CTRL);
   14860 	if (mreg & CTRL_RFCE)
   14861 		preg |= BM_RCTL_RFCE;
   14862 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   14863 
   14864 	wuc = WUC_APME | WUC_PME_EN;
   14865 	wufc = WUFC_MAG;
   14866 	/* Enable PHY wakeup in MAC register */
   14867 	CSR_WRITE(sc, WMREG_WUC,
   14868 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   14869 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   14870 
   14871 	/* Configure and enable PHY wakeup in PHY registers */
   14872 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   14873 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   14874 
   14875 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   14876 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   14877 
   14878 release:
   14879 	sc->phy.release(sc);
   14880 
   14881 	return 0;
   14882 }
   14883 
   14884 /* Power down workaround on D3 */
   14885 static void
   14886 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14887 {
   14888 	uint32_t reg;
   14889 	uint16_t phyreg;
   14890 	int i;
   14891 
   14892 	for (i = 0; i < 2; i++) {
   14893 		/* Disable link */
   14894 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14895 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14896 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14897 
   14898 		/*
   14899 		 * Call gig speed drop workaround on Gig disable before
   14900 		 * accessing any PHY registers
   14901 		 */
   14902 		if (sc->sc_type == WM_T_ICH8)
   14903 			wm_gig_downshift_workaround_ich8lan(sc);
   14904 
   14905 		/* Write VR power-down enable */
   14906 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14907 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14908 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14909 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   14910 
   14911 		/* Read it back and test */
   14912 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14913 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14914 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14915 			break;
   14916 
   14917 		/* Issue PHY reset and repeat at most one more time */
   14918 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14919 	}
   14920 }
   14921 
   14922 /*
   14923  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14924  *  @sc: pointer to the HW structure
   14925  *
   14926  *  During S0 to Sx transition, it is possible the link remains at gig
   14927  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14928  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14929  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14930  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14931  *  needs to be written.
   14932  *  Parts that support (and are linked to a partner which support) EEE in
   14933  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14934  *  than 10Mbps w/o EEE.
   14935  */
   14936 static void
   14937 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14938 {
   14939 	device_t dev = sc->sc_dev;
   14940 	struct ethercom *ec = &sc->sc_ethercom;
   14941 	uint32_t phy_ctrl;
   14942 	int rv;
   14943 
   14944 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14945 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14946 
   14947 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   14948 
   14949 	if (sc->sc_phytype == WMPHY_I217) {
   14950 		uint16_t devid = sc->sc_pcidevid;
   14951 
   14952 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14953 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14954 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14955 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14956 		    (sc->sc_type >= WM_T_PCH_SPT))
   14957 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14958 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14959 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14960 
   14961 		if (sc->phy.acquire(sc) != 0)
   14962 			goto out;
   14963 
   14964 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   14965 			uint16_t eee_advert;
   14966 
   14967 			rv = wm_read_emi_reg_locked(dev,
   14968 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   14969 			if (rv)
   14970 				goto release;
   14971 
   14972 			/*
   14973 			 * Disable LPLU if both link partners support 100BaseT
   14974 			 * EEE and 100Full is advertised on both ends of the
   14975 			 * link, and enable Auto Enable LPI since there will
   14976 			 * be no driver to enable LPI while in Sx.
   14977 			 */
   14978 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   14979 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   14980 				uint16_t anar, phy_reg;
   14981 
   14982 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   14983 				    &anar);
   14984 				if (anar & ANAR_TX_FD) {
   14985 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   14986 					    PHY_CTRL_NOND0A_LPLU);
   14987 
   14988 					/* Set Auto Enable LPI after link up */
   14989 					sc->phy.readreg_locked(dev, 2,
   14990 					    I217_LPI_GPIO_CTRL, &phy_reg);
   14991 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14992 					sc->phy.writereg_locked(dev, 2,
   14993 					    I217_LPI_GPIO_CTRL, phy_reg);
   14994 				}
   14995 			}
   14996 		}
   14997 
   14998 		/*
   14999 		 * For i217 Intel Rapid Start Technology support,
   15000 		 * when the system is going into Sx and no manageability engine
   15001 		 * is present, the driver must configure proxy to reset only on
   15002 		 * power good.	LPI (Low Power Idle) state must also reset only
   15003 		 * on power good, as well as the MTA (Multicast table array).
   15004 		 * The SMBus release must also be disabled on LCD reset.
   15005 		 */
   15006 
   15007 		/*
   15008 		 * Enable MTA to reset for Intel Rapid Start Technology
   15009 		 * Support
   15010 		 */
   15011 
   15012 release:
   15013 		sc->phy.release(sc);
   15014 	}
   15015 out:
   15016 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15017 
   15018 	if (sc->sc_type == WM_T_ICH8)
   15019 		wm_gig_downshift_workaround_ich8lan(sc);
   15020 
   15021 	if (sc->sc_type >= WM_T_PCH) {
   15022 		wm_oem_bits_config_ich8lan(sc, false);
   15023 
   15024 		/* Reset PHY to activate OEM bits on 82577/8 */
   15025 		if (sc->sc_type == WM_T_PCH)
   15026 			wm_reset_phy(sc);
   15027 
   15028 		if (sc->phy.acquire(sc) != 0)
   15029 			return;
   15030 		wm_write_smbus_addr(sc);
   15031 		sc->phy.release(sc);
   15032 	}
   15033 }
   15034 
   15035 /*
   15036  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15037  *  @sc: pointer to the HW structure
   15038  *
   15039  *  During Sx to S0 transitions on non-managed devices or managed devices
   15040  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15041  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15042  *  the PHY.
   15043  *  On i217, setup Intel Rapid Start Technology.
   15044  */
   15045 static int
   15046 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15047 {
   15048 	device_t dev = sc->sc_dev;
   15049 	int rv;
   15050 
   15051 	if (sc->sc_type < WM_T_PCH2)
   15052 		return 0;
   15053 
   15054 	rv = wm_init_phy_workarounds_pchlan(sc);
   15055 	if (rv != 0)
   15056 		return -1;
   15057 
   15058 	/* For i217 Intel Rapid Start Technology support when the system
   15059 	 * is transitioning from Sx and no manageability engine is present
   15060 	 * configure SMBus to restore on reset, disable proxy, and enable
   15061 	 * the reset on MTA (Multicast table array).
   15062 	 */
   15063 	if (sc->sc_phytype == WMPHY_I217) {
   15064 		uint16_t phy_reg;
   15065 
   15066 		if (sc->phy.acquire(sc) != 0)
   15067 			return -1;
   15068 
   15069 		/* Clear Auto Enable LPI after link up */
   15070 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15071 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15072 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15073 
   15074 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15075 			/* Restore clear on SMB if no manageability engine
   15076 			 * is present
   15077 			 */
   15078 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15079 			    &phy_reg);
   15080 			if (rv != 0)
   15081 				goto release;
   15082 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15083 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15084 
   15085 			/* Disable Proxy */
   15086 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15087 		}
   15088 		/* Enable reset on MTA */
   15089 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15090 		if (rv != 0)
   15091 			goto release;
   15092 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15093 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15094 
   15095 release:
   15096 		sc->phy.release(sc);
   15097 		return rv;
   15098 	}
   15099 
   15100 	return 0;
   15101 }
   15102 
   15103 static void
   15104 wm_enable_wakeup(struct wm_softc *sc)
   15105 {
   15106 	uint32_t reg, pmreg;
   15107 	pcireg_t pmode;
   15108 	int rv = 0;
   15109 
   15110 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15111 		device_xname(sc->sc_dev), __func__));
   15112 
   15113 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15114 	    &pmreg, NULL) == 0)
   15115 		return;
   15116 
   15117 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15118 		goto pme;
   15119 
   15120 	/* Advertise the wakeup capability */
   15121 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15122 	    | CTRL_SWDPIN(3));
   15123 
   15124 	/* Keep the laser running on fiber adapters */
   15125 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15126 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15127 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15128 		reg |= CTRL_EXT_SWDPIN(3);
   15129 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15130 	}
   15131 
   15132 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15133 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15134 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15135 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15136 		wm_suspend_workarounds_ich8lan(sc);
   15137 
   15138 #if 0	/* For the multicast packet */
   15139 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15140 	reg |= WUFC_MC;
   15141 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15142 #endif
   15143 
   15144 	if (sc->sc_type >= WM_T_PCH) {
   15145 		rv = wm_enable_phy_wakeup(sc);
   15146 		if (rv != 0)
   15147 			goto pme;
   15148 	} else {
   15149 		/* Enable wakeup by the MAC */
   15150 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15151 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15152 	}
   15153 
   15154 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15155 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15156 		|| (sc->sc_type == WM_T_PCH2))
   15157 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15158 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15159 
   15160 pme:
   15161 	/* Request PME */
   15162 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15163 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15164 		/* For WOL */
   15165 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   15166 	} else {
   15167 		/* Disable WOL */
   15168 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   15169 	}
   15170 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15171 }
   15172 
   15173 /* Disable ASPM L0s and/or L1 for workaround */
   15174 static void
   15175 wm_disable_aspm(struct wm_softc *sc)
   15176 {
   15177 	pcireg_t reg, mask = 0;
   15178 	unsigned const char *str = "";
   15179 
   15180 	/*
   15181 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15182 	 * space.
   15183 	 */
   15184 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15185 		return;
   15186 
   15187 	switch (sc->sc_type) {
   15188 	case WM_T_82571:
   15189 	case WM_T_82572:
   15190 		/*
   15191 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15192 		 * State Power management L1 State (ASPM L1).
   15193 		 */
   15194 		mask = PCIE_LCSR_ASPM_L1;
   15195 		str = "L1 is";
   15196 		break;
   15197 	case WM_T_82573:
   15198 	case WM_T_82574:
   15199 	case WM_T_82583:
   15200 		/*
   15201 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15202 		 *
   15203 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15204 		 * some chipset.  The document of 82574 and 82583 says that
   15205 		 * disabling L0s with some specific chipset is sufficient,
   15206 		 * but we follow as of the Intel em driver does.
   15207 		 *
   15208 		 * References:
   15209 		 * Errata 8 of the Specification Update of i82573.
   15210 		 * Errata 20 of the Specification Update of i82574.
   15211 		 * Errata 9 of the Specification Update of i82583.
   15212 		 */
   15213 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15214 		str = "L0s and L1 are";
   15215 		break;
   15216 	default:
   15217 		return;
   15218 	}
   15219 
   15220 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15221 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15222 	reg &= ~mask;
   15223 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15224 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15225 
   15226 	/* Print only in wm_attach() */
   15227 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15228 		aprint_verbose_dev(sc->sc_dev,
   15229 		    "ASPM %s disabled to workaround the errata.\n", str);
   15230 }
   15231 
   15232 /* LPLU */
   15233 
   15234 static void
   15235 wm_lplu_d0_disable(struct wm_softc *sc)
   15236 {
   15237 	struct mii_data *mii = &sc->sc_mii;
   15238 	uint32_t reg;
   15239 	uint16_t phyval;
   15240 
   15241 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15242 		device_xname(sc->sc_dev), __func__));
   15243 
   15244 	if (sc->sc_phytype == WMPHY_IFE)
   15245 		return;
   15246 
   15247 	switch (sc->sc_type) {
   15248 	case WM_T_82571:
   15249 	case WM_T_82572:
   15250 	case WM_T_82573:
   15251 	case WM_T_82575:
   15252 	case WM_T_82576:
   15253 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15254 		phyval &= ~PMR_D0_LPLU;
   15255 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15256 		break;
   15257 	case WM_T_82580:
   15258 	case WM_T_I350:
   15259 	case WM_T_I210:
   15260 	case WM_T_I211:
   15261 		reg = CSR_READ(sc, WMREG_PHPM);
   15262 		reg &= ~PHPM_D0A_LPLU;
   15263 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15264 		break;
   15265 	case WM_T_82574:
   15266 	case WM_T_82583:
   15267 	case WM_T_ICH8:
   15268 	case WM_T_ICH9:
   15269 	case WM_T_ICH10:
   15270 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15271 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15272 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15273 		CSR_WRITE_FLUSH(sc);
   15274 		break;
   15275 	case WM_T_PCH:
   15276 	case WM_T_PCH2:
   15277 	case WM_T_PCH_LPT:
   15278 	case WM_T_PCH_SPT:
   15279 	case WM_T_PCH_CNP:
   15280 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15281 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15282 		if (wm_phy_resetisblocked(sc) == false)
   15283 			phyval |= HV_OEM_BITS_ANEGNOW;
   15284 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15285 		break;
   15286 	default:
   15287 		break;
   15288 	}
   15289 }
   15290 
   15291 /* EEE */
   15292 
   15293 static int
   15294 wm_set_eee_i350(struct wm_softc *sc)
   15295 {
   15296 	struct ethercom *ec = &sc->sc_ethercom;
   15297 	uint32_t ipcnfg, eeer;
   15298 	uint32_t ipcnfg_mask
   15299 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15300 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15301 
   15302 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15303 
   15304 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15305 	eeer = CSR_READ(sc, WMREG_EEER);
   15306 
   15307 	/* Enable or disable per user setting */
   15308 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15309 		ipcnfg |= ipcnfg_mask;
   15310 		eeer |= eeer_mask;
   15311 	} else {
   15312 		ipcnfg &= ~ipcnfg_mask;
   15313 		eeer &= ~eeer_mask;
   15314 	}
   15315 
   15316 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15317 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15318 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15319 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15320 
   15321 	return 0;
   15322 }
   15323 
   15324 static int
   15325 wm_set_eee_pchlan(struct wm_softc *sc)
   15326 {
   15327 	device_t dev = sc->sc_dev;
   15328 	struct ethercom *ec = &sc->sc_ethercom;
   15329 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15330 	int rv = 0;
   15331 
   15332 	switch (sc->sc_phytype) {
   15333 	case WMPHY_82579:
   15334 		lpa = I82579_EEE_LP_ABILITY;
   15335 		pcs_status = I82579_EEE_PCS_STATUS;
   15336 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15337 		break;
   15338 	case WMPHY_I217:
   15339 		lpa = I217_EEE_LP_ABILITY;
   15340 		pcs_status = I217_EEE_PCS_STATUS;
   15341 		adv_addr = I217_EEE_ADVERTISEMENT;
   15342 		break;
   15343 	default:
   15344 		return 0;
   15345 	}
   15346 
   15347 	if (sc->phy.acquire(sc)) {
   15348 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15349 		return 0;
   15350 	}
   15351 
   15352 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15353 	if (rv != 0)
   15354 		goto release;
   15355 
   15356 	/* Clear bits that enable EEE in various speeds */
   15357 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15358 
   15359 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15360 		/* Save off link partner's EEE ability */
   15361 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15362 		if (rv != 0)
   15363 			goto release;
   15364 
   15365 		/* Read EEE advertisement */
   15366 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15367 			goto release;
   15368 
   15369 		/*
   15370 		 * Enable EEE only for speeds in which the link partner is
   15371 		 * EEE capable and for which we advertise EEE.
   15372 		 */
   15373 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15374 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15375 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15376 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15377 			if ((data & ANLPAR_TX_FD) != 0)
   15378 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15379 			else {
   15380 				/*
   15381 				 * EEE is not supported in 100Half, so ignore
   15382 				 * partner's EEE in 100 ability if full-duplex
   15383 				 * is not advertised.
   15384 				 */
   15385 				sc->eee_lp_ability
   15386 				    &= ~AN_EEEADVERT_100_TX;
   15387 			}
   15388 		}
   15389 	}
   15390 
   15391 	if (sc->sc_phytype == WMPHY_82579) {
   15392 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15393 		if (rv != 0)
   15394 			goto release;
   15395 
   15396 		data &= ~I82579_LPI_PLL_SHUT_100;
   15397 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15398 	}
   15399 
   15400 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15401 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15402 		goto release;
   15403 
   15404 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15405 release:
   15406 	sc->phy.release(sc);
   15407 
   15408 	return rv;
   15409 }
   15410 
   15411 static int
   15412 wm_set_eee(struct wm_softc *sc)
   15413 {
   15414 	struct ethercom *ec = &sc->sc_ethercom;
   15415 
   15416 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15417 		return 0;
   15418 
   15419 	if (sc->sc_type == WM_T_I354) {
   15420 		/* I354 uses an external PHY */
   15421 		return 0; /* not yet */
   15422 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15423 		return wm_set_eee_i350(sc);
   15424 	else if (sc->sc_type >= WM_T_PCH2)
   15425 		return wm_set_eee_pchlan(sc);
   15426 
   15427 	return 0;
   15428 }
   15429 
   15430 /*
   15431  * Workarounds (mainly PHY related).
   15432  * Basically, PHY's workarounds are in the PHY drivers.
   15433  */
   15434 
   15435 /* Work-around for 82566 Kumeran PCS lock loss */
   15436 static int
   15437 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15438 {
   15439 	struct mii_data *mii = &sc->sc_mii;
   15440 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15441 	int i, reg, rv;
   15442 	uint16_t phyreg;
   15443 
   15444 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15445 		device_xname(sc->sc_dev), __func__));
   15446 
   15447 	/* If the link is not up, do nothing */
   15448 	if ((status & STATUS_LU) == 0)
   15449 		return 0;
   15450 
   15451 	/* Nothing to do if the link is other than 1Gbps */
   15452 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15453 		return 0;
   15454 
   15455 	for (i = 0; i < 10; i++) {
   15456 		/* read twice */
   15457 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15458 		if (rv != 0)
   15459 			return rv;
   15460 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15461 		if (rv != 0)
   15462 			return rv;
   15463 
   15464 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15465 			goto out;	/* GOOD! */
   15466 
   15467 		/* Reset the PHY */
   15468 		wm_reset_phy(sc);
   15469 		delay(5*1000);
   15470 	}
   15471 
   15472 	/* Disable GigE link negotiation */
   15473 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15474 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15475 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15476 
   15477 	/*
   15478 	 * Call gig speed drop workaround on Gig disable before accessing
   15479 	 * any PHY registers.
   15480 	 */
   15481 	wm_gig_downshift_workaround_ich8lan(sc);
   15482 
   15483 out:
   15484 	return 0;
   15485 }
   15486 
   15487 /*
   15488  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15489  *  @sc: pointer to the HW structure
   15490  *
   15491  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15492  *  LPLU, Gig disable, MDIC PHY reset):
   15493  *    1) Set Kumeran Near-end loopback
   15494  *    2) Clear Kumeran Near-end loopback
   15495  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15496  */
   15497 static void
   15498 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15499 {
   15500 	uint16_t kmreg;
   15501 
   15502 	/* Only for igp3 */
   15503 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15504 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15505 			return;
   15506 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15507 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15508 			return;
   15509 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15510 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15511 	}
   15512 }
   15513 
   15514 /*
   15515  * Workaround for pch's PHYs
   15516  * XXX should be moved to new PHY driver?
   15517  */
   15518 static int
   15519 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15520 {
   15521 	device_t dev = sc->sc_dev;
   15522 	struct mii_data *mii = &sc->sc_mii;
   15523 	struct mii_softc *child;
   15524 	uint16_t phy_data, phyrev = 0;
   15525 	int phytype = sc->sc_phytype;
   15526 	int rv;
   15527 
   15528 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15529 		device_xname(dev), __func__));
   15530 	KASSERT(sc->sc_type == WM_T_PCH);
   15531 
   15532 	/* Set MDIO slow mode before any other MDIO access */
   15533 	if (phytype == WMPHY_82577)
   15534 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15535 			return rv;
   15536 
   15537 	child = LIST_FIRST(&mii->mii_phys);
   15538 	if (child != NULL)
   15539 		phyrev = child->mii_mpd_rev;
   15540 
   15541 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15542 	if ((child != NULL) &&
   15543 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15544 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15545 		/* Disable generation of early preamble (0x4431) */
   15546 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15547 		    &phy_data);
   15548 		if (rv != 0)
   15549 			return rv;
   15550 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15551 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15552 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15553 		    phy_data);
   15554 		if (rv != 0)
   15555 			return rv;
   15556 
   15557 		/* Preamble tuning for SSC */
   15558 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15559 		if (rv != 0)
   15560 			return rv;
   15561 	}
   15562 
   15563 	/* 82578 */
   15564 	if (phytype == WMPHY_82578) {
   15565 		/*
   15566 		 * Return registers to default by doing a soft reset then
   15567 		 * writing 0x3140 to the control register
   15568 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15569 		 */
   15570 		if ((child != NULL) && (phyrev < 2)) {
   15571 			PHY_RESET(child);
   15572 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   15573 			if (rv != 0)
   15574 				return rv;
   15575 		}
   15576 	}
   15577 
   15578 	/* Select page 0 */
   15579 	if ((rv = sc->phy.acquire(sc)) != 0)
   15580 		return rv;
   15581 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15582 	sc->phy.release(sc);
   15583 	if (rv != 0)
   15584 		return rv;
   15585 
   15586 	/*
   15587 	 * Configure the K1 Si workaround during phy reset assuming there is
   15588 	 * link so that it disables K1 if link is in 1Gbps.
   15589 	 */
   15590 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15591 		return rv;
   15592 
   15593 	/* Workaround for link disconnects on a busy hub in half duplex */
   15594 	rv = sc->phy.acquire(sc);
   15595 	if (rv)
   15596 		return rv;
   15597 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15598 	if (rv)
   15599 		goto release;
   15600 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15601 	    phy_data & 0x00ff);
   15602 	if (rv)
   15603 		goto release;
   15604 
   15605 	/* Set MSE higher to enable link to stay up when noise is high */
   15606 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15607 release:
   15608 	sc->phy.release(sc);
   15609 
   15610 	return rv;
   15611 }
   15612 
   15613 /*
   15614  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15615  *  @sc:   pointer to the HW structure
   15616  */
   15617 static void
   15618 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15619 {
   15620 	device_t dev = sc->sc_dev;
   15621 	uint32_t mac_reg;
   15622 	uint16_t i, wuce;
   15623 	int count;
   15624 
   15625 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15626 		device_xname(sc->sc_dev), __func__));
   15627 
   15628 	if (sc->phy.acquire(sc) != 0)
   15629 		return;
   15630 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15631 		goto release;
   15632 
   15633 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15634 	count = wm_rar_count(sc);
   15635 	for (i = 0; i < count; i++) {
   15636 		uint16_t lo, hi;
   15637 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15638 		lo = (uint16_t)(mac_reg & 0xffff);
   15639 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15640 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15641 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15642 
   15643 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15644 		lo = (uint16_t)(mac_reg & 0xffff);
   15645 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15646 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15647 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15648 	}
   15649 
   15650 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15651 
   15652 release:
   15653 	sc->phy.release(sc);
   15654 }
   15655 
   15656 /*
   15657  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15658  *  done after every PHY reset.
   15659  */
   15660 static int
   15661 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15662 {
   15663 	device_t dev = sc->sc_dev;
   15664 	int rv;
   15665 
   15666 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15667 		device_xname(dev), __func__));
   15668 	KASSERT(sc->sc_type == WM_T_PCH2);
   15669 
   15670 	/* Set MDIO slow mode before any other MDIO access */
   15671 	rv = wm_set_mdio_slow_mode_hv(sc);
   15672 	if (rv != 0)
   15673 		return rv;
   15674 
   15675 	rv = sc->phy.acquire(sc);
   15676 	if (rv != 0)
   15677 		return rv;
   15678 	/* Set MSE higher to enable link to stay up when noise is high */
   15679 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15680 	if (rv != 0)
   15681 		goto release;
   15682 	/* Drop link after 5 times MSE threshold was reached */
   15683 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15684 release:
   15685 	sc->phy.release(sc);
   15686 
   15687 	return rv;
   15688 }
   15689 
   15690 /**
   15691  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15692  *  @link: link up bool flag
   15693  *
   15694  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15695  *  preventing further DMA write requests.  Workaround the issue by disabling
   15696  *  the de-assertion of the clock request when in 1Gpbs mode.
   15697  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15698  *  speeds in order to avoid Tx hangs.
   15699  **/
   15700 static int
   15701 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15702 {
   15703 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15704 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15705 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15706 	uint16_t phyreg;
   15707 
   15708 	if (link && (speed == STATUS_SPEED_1000)) {
   15709 		sc->phy.acquire(sc);
   15710 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15711 		    &phyreg);
   15712 		if (rv != 0)
   15713 			goto release;
   15714 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15715 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15716 		if (rv != 0)
   15717 			goto release;
   15718 		delay(20);
   15719 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15720 
   15721 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15722 		    &phyreg);
   15723 release:
   15724 		sc->phy.release(sc);
   15725 		return rv;
   15726 	}
   15727 
   15728 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15729 
   15730 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15731 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15732 	    || !link
   15733 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15734 		goto update_fextnvm6;
   15735 
   15736 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15737 
   15738 	/* Clear link status transmit timeout */
   15739 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15740 	if (speed == STATUS_SPEED_100) {
   15741 		/* Set inband Tx timeout to 5x10us for 100Half */
   15742 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15743 
   15744 		/* Do not extend the K1 entry latency for 100Half */
   15745 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15746 	} else {
   15747 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15748 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15749 
   15750 		/* Extend the K1 entry latency for 10 Mbps */
   15751 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15752 	}
   15753 
   15754 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15755 
   15756 update_fextnvm6:
   15757 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15758 	return 0;
   15759 }
   15760 
   15761 /*
   15762  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15763  *  @sc:   pointer to the HW structure
   15764  *  @link: link up bool flag
   15765  *
   15766  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15767  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15768  *  If link is down, the function will restore the default K1 setting located
   15769  *  in the NVM.
   15770  */
   15771 static int
   15772 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15773 {
   15774 	int k1_enable = sc->sc_nvm_k1_enabled;
   15775 
   15776 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15777 		device_xname(sc->sc_dev), __func__));
   15778 
   15779 	if (sc->phy.acquire(sc) != 0)
   15780 		return -1;
   15781 
   15782 	if (link) {
   15783 		k1_enable = 0;
   15784 
   15785 		/* Link stall fix for link up */
   15786 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15787 		    0x0100);
   15788 	} else {
   15789 		/* Link stall fix for link down */
   15790 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15791 		    0x4100);
   15792 	}
   15793 
   15794 	wm_configure_k1_ich8lan(sc, k1_enable);
   15795 	sc->phy.release(sc);
   15796 
   15797 	return 0;
   15798 }
   15799 
   15800 /*
   15801  *  wm_k1_workaround_lv - K1 Si workaround
   15802  *  @sc:   pointer to the HW structure
   15803  *
   15804  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   15805  *  Disable K1 for 1000 and 100 speeds
   15806  */
   15807 static int
   15808 wm_k1_workaround_lv(struct wm_softc *sc)
   15809 {
   15810 	uint32_t reg;
   15811 	uint16_t phyreg;
   15812 	int rv;
   15813 
   15814 	if (sc->sc_type != WM_T_PCH2)
   15815 		return 0;
   15816 
   15817 	/* Set K1 beacon duration based on 10Mbps speed */
   15818 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   15819 	if (rv != 0)
   15820 		return rv;
   15821 
   15822 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   15823 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   15824 		if (phyreg &
   15825 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   15826 			/* LV 1G/100 Packet drop issue wa  */
   15827 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   15828 			    &phyreg);
   15829 			if (rv != 0)
   15830 				return rv;
   15831 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   15832 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   15833 			    phyreg);
   15834 			if (rv != 0)
   15835 				return rv;
   15836 		} else {
   15837 			/* For 10Mbps */
   15838 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   15839 			reg &= ~FEXTNVM4_BEACON_DURATION;
   15840 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   15841 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   15842 		}
   15843 	}
   15844 
   15845 	return 0;
   15846 }
   15847 
   15848 /*
   15849  *  wm_link_stall_workaround_hv - Si workaround
   15850  *  @sc: pointer to the HW structure
   15851  *
   15852  *  This function works around a Si bug where the link partner can get
   15853  *  a link up indication before the PHY does. If small packets are sent
   15854  *  by the link partner they can be placed in the packet buffer without
   15855  *  being properly accounted for by the PHY and will stall preventing
   15856  *  further packets from being received.  The workaround is to clear the
   15857  *  packet buffer after the PHY detects link up.
   15858  */
   15859 static int
   15860 wm_link_stall_workaround_hv(struct wm_softc *sc)
   15861 {
   15862 	uint16_t phyreg;
   15863 
   15864 	if (sc->sc_phytype != WMPHY_82578)
   15865 		return 0;
   15866 
   15867 	/* Do not apply workaround if in PHY loopback bit 14 set */
   15868 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   15869 	if ((phyreg & BMCR_LOOP) != 0)
   15870 		return 0;
   15871 
   15872 	/* Check if link is up and at 1Gbps */
   15873 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   15874 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15875 	    | BM_CS_STATUS_SPEED_MASK;
   15876 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15877 		| BM_CS_STATUS_SPEED_1000))
   15878 		return 0;
   15879 
   15880 	delay(200 * 1000);	/* XXX too big */
   15881 
   15882 	/* Flush the packets in the fifo buffer */
   15883 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15884 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   15885 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15886 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   15887 
   15888 	return 0;
   15889 }
   15890 
   15891 static int
   15892 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   15893 {
   15894 	int rv;
   15895 	uint16_t reg;
   15896 
   15897 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   15898 	if (rv != 0)
   15899 		return rv;
   15900 
   15901 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   15902 	    reg | HV_KMRN_MDIO_SLOW);
   15903 }
   15904 
   15905 /*
   15906  *  wm_configure_k1_ich8lan - Configure K1 power state
   15907  *  @sc: pointer to the HW structure
   15908  *  @enable: K1 state to configure
   15909  *
   15910  *  Configure the K1 power state based on the provided parameter.
   15911  *  Assumes semaphore already acquired.
   15912  */
   15913 static void
   15914 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   15915 {
   15916 	uint32_t ctrl, ctrl_ext, tmp;
   15917 	uint16_t kmreg;
   15918 	int rv;
   15919 
   15920 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15921 
   15922 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   15923 	if (rv != 0)
   15924 		return;
   15925 
   15926 	if (k1_enable)
   15927 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   15928 	else
   15929 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   15930 
   15931 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   15932 	if (rv != 0)
   15933 		return;
   15934 
   15935 	delay(20);
   15936 
   15937 	ctrl = CSR_READ(sc, WMREG_CTRL);
   15938 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   15939 
   15940 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   15941 	tmp |= CTRL_FRCSPD;
   15942 
   15943 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   15944 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   15945 	CSR_WRITE_FLUSH(sc);
   15946 	delay(20);
   15947 
   15948 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   15949 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   15950 	CSR_WRITE_FLUSH(sc);
   15951 	delay(20);
   15952 
   15953 	return;
   15954 }
   15955 
   15956 /* special case - for 82575 - need to do manual init ... */
   15957 static void
   15958 wm_reset_init_script_82575(struct wm_softc *sc)
   15959 {
   15960 	/*
   15961 	 * Remark: this is untested code - we have no board without EEPROM
   15962 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   15963 	 */
   15964 
   15965 	/* SerDes configuration via SERDESCTRL */
   15966 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   15967 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   15968 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   15969 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   15970 
   15971 	/* CCM configuration via CCMCTL register */
   15972 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   15973 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   15974 
   15975 	/* PCIe lanes configuration */
   15976 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   15977 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   15978 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   15979 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   15980 
   15981 	/* PCIe PLL Configuration */
   15982 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   15983 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   15984 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   15985 }
   15986 
   15987 static void
   15988 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   15989 {
   15990 	uint32_t reg;
   15991 	uint16_t nvmword;
   15992 	int rv;
   15993 
   15994 	if (sc->sc_type != WM_T_82580)
   15995 		return;
   15996 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   15997 		return;
   15998 
   15999 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16000 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16001 	if (rv != 0) {
   16002 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16003 		    __func__);
   16004 		return;
   16005 	}
   16006 
   16007 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16008 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16009 		reg |= MDICNFG_DEST;
   16010 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16011 		reg |= MDICNFG_COM_MDIO;
   16012 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16013 }
   16014 
   16015 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16016 
   16017 static bool
   16018 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16019 {
   16020 	uint32_t reg;
   16021 	uint16_t id1, id2;
   16022 	int i, rv;
   16023 
   16024 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16025 		device_xname(sc->sc_dev), __func__));
   16026 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16027 
   16028 	id1 = id2 = 0xffff;
   16029 	for (i = 0; i < 2; i++) {
   16030 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16031 		    &id1);
   16032 		if ((rv != 0) || MII_INVALIDID(id1))
   16033 			continue;
   16034 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16035 		    &id2);
   16036 		if ((rv != 0) || MII_INVALIDID(id2))
   16037 			continue;
   16038 		break;
   16039 	}
   16040 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16041 		goto out;
   16042 
   16043 	/*
   16044 	 * In case the PHY needs to be in mdio slow mode,
   16045 	 * set slow mode and try to get the PHY id again.
   16046 	 */
   16047 	rv = 0;
   16048 	if (sc->sc_type < WM_T_PCH_LPT) {
   16049 		sc->phy.release(sc);
   16050 		wm_set_mdio_slow_mode_hv(sc);
   16051 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16052 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16053 		sc->phy.acquire(sc);
   16054 	}
   16055 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16056 		device_printf(sc->sc_dev, "XXX return with false\n");
   16057 		return false;
   16058 	}
   16059 out:
   16060 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16061 		/* Only unforce SMBus if ME is not active */
   16062 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16063 			uint16_t phyreg;
   16064 
   16065 			/* Unforce SMBus mode in PHY */
   16066 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16067 			    CV_SMB_CTRL, &phyreg);
   16068 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16069 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16070 			    CV_SMB_CTRL, phyreg);
   16071 
   16072 			/* Unforce SMBus mode in MAC */
   16073 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16074 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16075 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16076 		}
   16077 	}
   16078 	return true;
   16079 }
   16080 
   16081 static void
   16082 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16083 {
   16084 	uint32_t reg;
   16085 	int i;
   16086 
   16087 	/* Set PHY Config Counter to 50msec */
   16088 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16089 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16090 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16091 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16092 
   16093 	/* Toggle LANPHYPC */
   16094 	reg = CSR_READ(sc, WMREG_CTRL);
   16095 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16096 	reg &= ~CTRL_LANPHYPC_VALUE;
   16097 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16098 	CSR_WRITE_FLUSH(sc);
   16099 	delay(1000);
   16100 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16101 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16102 	CSR_WRITE_FLUSH(sc);
   16103 
   16104 	if (sc->sc_type < WM_T_PCH_LPT)
   16105 		delay(50 * 1000);
   16106 	else {
   16107 		i = 20;
   16108 
   16109 		do {
   16110 			delay(5 * 1000);
   16111 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16112 		    && i--);
   16113 
   16114 		delay(30 * 1000);
   16115 	}
   16116 }
   16117 
   16118 static int
   16119 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16120 {
   16121 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16122 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16123 	uint32_t rxa;
   16124 	uint16_t scale = 0, lat_enc = 0;
   16125 	int32_t obff_hwm = 0;
   16126 	int64_t lat_ns, value;
   16127 
   16128 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16129 		device_xname(sc->sc_dev), __func__));
   16130 
   16131 	if (link) {
   16132 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16133 		uint32_t status;
   16134 		uint16_t speed;
   16135 		pcireg_t preg;
   16136 
   16137 		status = CSR_READ(sc, WMREG_STATUS);
   16138 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16139 		case STATUS_SPEED_10:
   16140 			speed = 10;
   16141 			break;
   16142 		case STATUS_SPEED_100:
   16143 			speed = 100;
   16144 			break;
   16145 		case STATUS_SPEED_1000:
   16146 			speed = 1000;
   16147 			break;
   16148 		default:
   16149 			device_printf(sc->sc_dev, "Unknown speed "
   16150 			    "(status = %08x)\n", status);
   16151 			return -1;
   16152 		}
   16153 
   16154 		/* Rx Packet Buffer Allocation size (KB) */
   16155 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16156 
   16157 		/*
   16158 		 * Determine the maximum latency tolerated by the device.
   16159 		 *
   16160 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16161 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16162 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16163 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16164 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16165 		 */
   16166 		lat_ns = ((int64_t)rxa * 1024 -
   16167 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16168 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16169 		if (lat_ns < 0)
   16170 			lat_ns = 0;
   16171 		else
   16172 			lat_ns /= speed;
   16173 		value = lat_ns;
   16174 
   16175 		while (value > LTRV_VALUE) {
   16176 			scale ++;
   16177 			value = howmany(value, __BIT(5));
   16178 		}
   16179 		if (scale > LTRV_SCALE_MAX) {
   16180 			device_printf(sc->sc_dev,
   16181 			    "Invalid LTR latency scale %d\n", scale);
   16182 			return -1;
   16183 		}
   16184 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16185 
   16186 		/* Determine the maximum latency tolerated by the platform */
   16187 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16188 		    WM_PCI_LTR_CAP_LPT);
   16189 		max_snoop = preg & 0xffff;
   16190 		max_nosnoop = preg >> 16;
   16191 
   16192 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16193 
   16194 		if (lat_enc > max_ltr_enc) {
   16195 			lat_enc = max_ltr_enc;
   16196 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16197 			    * PCI_LTR_SCALETONS(
   16198 				    __SHIFTOUT(lat_enc,
   16199 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16200 		}
   16201 
   16202 		if (lat_ns) {
   16203 			lat_ns *= speed * 1000;
   16204 			lat_ns /= 8;
   16205 			lat_ns /= 1000000000;
   16206 			obff_hwm = (int32_t)(rxa - lat_ns);
   16207 		}
   16208 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16209 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16210 			    "(rxa = %d, lat_ns = %d)\n",
   16211 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16212 			return -1;
   16213 		}
   16214 	}
   16215 	/* Snoop and No-Snoop latencies the same */
   16216 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16217 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16218 
   16219 	/* Set OBFF high water mark */
   16220 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16221 	reg |= obff_hwm;
   16222 	CSR_WRITE(sc, WMREG_SVT, reg);
   16223 
   16224 	/* Enable OBFF */
   16225 	reg = CSR_READ(sc, WMREG_SVCR);
   16226 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16227 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16228 
   16229 	return 0;
   16230 }
   16231 
   16232 /*
   16233  * I210 Errata 25 and I211 Errata 10
   16234  * Slow System Clock.
   16235  */
   16236 static int
   16237 wm_pll_workaround_i210(struct wm_softc *sc)
   16238 {
   16239 	uint32_t mdicnfg, wuc;
   16240 	uint32_t reg;
   16241 	pcireg_t pcireg;
   16242 	uint32_t pmreg;
   16243 	uint16_t nvmword, tmp_nvmword;
   16244 	uint16_t phyval;
   16245 	bool wa_done = false;
   16246 	int i, rv = 0;
   16247 
   16248 	/* Get Power Management cap offset */
   16249 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16250 	    &pmreg, NULL) == 0)
   16251 		return -1;
   16252 
   16253 	/* Save WUC and MDICNFG registers */
   16254 	wuc = CSR_READ(sc, WMREG_WUC);
   16255 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16256 
   16257 	reg = mdicnfg & ~MDICNFG_DEST;
   16258 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16259 
   16260 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   16261 		nvmword = INVM_DEFAULT_AL;
   16262 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16263 
   16264 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16265 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16266 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16267 
   16268 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16269 			rv = 0;
   16270 			break; /* OK */
   16271 		} else
   16272 			rv = -1;
   16273 
   16274 		wa_done = true;
   16275 		/* Directly reset the internal PHY */
   16276 		reg = CSR_READ(sc, WMREG_CTRL);
   16277 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16278 
   16279 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16280 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16281 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16282 
   16283 		CSR_WRITE(sc, WMREG_WUC, 0);
   16284 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16285 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16286 
   16287 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16288 		    pmreg + PCI_PMCSR);
   16289 		pcireg |= PCI_PMCSR_STATE_D3;
   16290 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16291 		    pmreg + PCI_PMCSR, pcireg);
   16292 		delay(1000);
   16293 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16294 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16295 		    pmreg + PCI_PMCSR, pcireg);
   16296 
   16297 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16298 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16299 
   16300 		/* Restore WUC register */
   16301 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16302 	}
   16303 
   16304 	/* Restore MDICNFG setting */
   16305 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16306 	if (wa_done)
   16307 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16308 	return rv;
   16309 }
   16310 
   16311 static void
   16312 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16313 {
   16314 	uint32_t reg;
   16315 
   16316 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16317 		device_xname(sc->sc_dev), __func__));
   16318 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16319 	    || (sc->sc_type == WM_T_PCH_CNP));
   16320 
   16321 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16322 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16323 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16324 
   16325 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16326 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16327 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16328 }
   16329