Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.667
      1 /*	$NetBSD: if_wm.c,v 1.667 2020/02/18 03:48:22 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet) for I354
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  */
     83 
     84 #include <sys/cdefs.h>
     85 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.667 2020/02/18 03:48:22 msaitoh Exp $");
     86 
     87 #ifdef _KERNEL_OPT
     88 #include "opt_net_mpsafe.h"
     89 #include "opt_if_wm.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 #include <sys/cpu.h>
    107 #include <sys/pcq.h>
    108 #include <sys/sysctl.h>
    109 #include <sys/workqueue.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <net/rss_config.h>
    121 
    122 #include <netinet/in.h>			/* XXX for struct ip */
    123 #include <netinet/in_systm.h>		/* XXX for struct ip */
    124 #include <netinet/ip.h>			/* XXX for struct ip */
    125 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    126 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    127 
    128 #include <sys/bus.h>
    129 #include <sys/intr.h>
    130 #include <machine/endian.h>
    131 
    132 #include <dev/mii/mii.h>
    133 #include <dev/mii/mdio.h>
    134 #include <dev/mii/miivar.h>
    135 #include <dev/mii/miidevs.h>
    136 #include <dev/mii/mii_bitbang.h>
    137 #include <dev/mii/ikphyreg.h>
    138 #include <dev/mii/igphyreg.h>
    139 #include <dev/mii/igphyvar.h>
    140 #include <dev/mii/inbmphyreg.h>
    141 #include <dev/mii/ihphyreg.h>
    142 
    143 #include <dev/pci/pcireg.h>
    144 #include <dev/pci/pcivar.h>
    145 #include <dev/pci/pcidevs.h>
    146 
    147 #include <dev/pci/if_wmreg.h>
    148 #include <dev/pci/if_wmvar.h>
    149 
    150 #ifdef WM_DEBUG
    151 #define	WM_DEBUG_LINK		__BIT(0)
    152 #define	WM_DEBUG_TX		__BIT(1)
    153 #define	WM_DEBUG_RX		__BIT(2)
    154 #define	WM_DEBUG_GMII		__BIT(3)
    155 #define	WM_DEBUG_MANAGE		__BIT(4)
    156 #define	WM_DEBUG_NVM		__BIT(5)
    157 #define	WM_DEBUG_INIT		__BIT(6)
    158 #define	WM_DEBUG_LOCK		__BIT(7)
    159 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    160     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    161 #define	DPRINTF(x, y)	do { if (wm_debug & (x)) printf y; } while (0)
    162 #else
    163 #define	DPRINTF(x, y)	__nothing
    164 #endif /* WM_DEBUG */
    165 
    166 #ifdef NET_MPSAFE
    167 #define WM_MPSAFE	1
    168 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    169 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU | WQ_MPSAFE
    170 #else
    171 #define CALLOUT_FLAGS	0
    172 #define WM_WORKQUEUE_FLAGS	WQ_PERCPU
    173 #endif
    174 
    175 #define WM_WORKQUEUE_PRI PRI_SOFTNET
    176 
    177 /*
    178  * This device driver's max interrupt numbers.
    179  */
    180 #define WM_MAX_NQUEUEINTR	16
    181 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    182 
    183 #ifndef WM_DISABLE_MSI
    184 #define	WM_DISABLE_MSI 0
    185 #endif
    186 #ifndef WM_DISABLE_MSIX
    187 #define	WM_DISABLE_MSIX 0
    188 #endif
    189 
    190 int wm_disable_msi = WM_DISABLE_MSI;
    191 int wm_disable_msix = WM_DISABLE_MSIX;
    192 
    193 #ifndef WM_WATCHDOG_TIMEOUT
    194 #define WM_WATCHDOG_TIMEOUT 5
    195 #endif
    196 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    197 
    198 /*
    199  * Transmit descriptor list size.  Due to errata, we can only have
    200  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    201  * on >= 82544. We tell the upper layers that they can queue a lot
    202  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    203  * of them at a time.
    204  *
    205  * We allow up to 64 DMA segments per packet.  Pathological packet
    206  * chains containing many small mbufs have been observed in zero-copy
    207  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    208  * m_defrag() is called to reduce it.
    209  */
    210 #define	WM_NTXSEGS		64
    211 #define	WM_IFQUEUELEN		256
    212 #define	WM_TXQUEUELEN_MAX	64
    213 #define	WM_TXQUEUELEN_MAX_82547	16
    214 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    215 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    216 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    217 #define	WM_NTXDESC_82542	256
    218 #define	WM_NTXDESC_82544	4096
    219 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    220 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    221 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    222 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    223 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    224 
    225 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    226 
    227 #define	WM_TXINTERQSIZE		256
    228 
    229 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    230 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    231 #endif
    232 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    233 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    234 #endif
    235 
    236 /*
    237  * Receive descriptor list size.  We have one Rx buffer for normal
    238  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    239  * packet.  We allocate 256 receive descriptors, each with a 2k
    240  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    241  */
    242 #define	WM_NRXDESC		256U
    243 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    244 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    245 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    246 
    247 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    248 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    249 #endif
    250 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    251 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    252 #endif
    253 
    254 typedef union txdescs {
    255 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    256 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    257 } txdescs_t;
    258 
    259 typedef union rxdescs {
    260 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    261 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    262 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    263 } rxdescs_t;
    264 
    265 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    266 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    267 
    268 /*
    269  * Software state for transmit jobs.
    270  */
    271 struct wm_txsoft {
    272 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    273 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    274 	int txs_firstdesc;		/* first descriptor in packet */
    275 	int txs_lastdesc;		/* last descriptor in packet */
    276 	int txs_ndesc;			/* # of descriptors used */
    277 };
    278 
    279 /*
    280  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    281  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    282  * them together.
    283  */
    284 struct wm_rxsoft {
    285 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    286 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    287 };
    288 
    289 #define WM_LINKUP_TIMEOUT	50
    290 
    291 static uint16_t swfwphysem[] = {
    292 	SWFW_PHY0_SM,
    293 	SWFW_PHY1_SM,
    294 	SWFW_PHY2_SM,
    295 	SWFW_PHY3_SM
    296 };
    297 
    298 static const uint32_t wm_82580_rxpbs_table[] = {
    299 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    300 };
    301 
    302 struct wm_softc;
    303 
    304 #ifdef WM_EVENT_COUNTERS
    305 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    306 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    307 	struct evcnt qname##_ev_##evname;
    308 
    309 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    310 	do {								\
    311 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    312 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    313 		    "%s%02d%s", #qname, (qnum), #evname);		\
    314 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    315 		    (evtype), NULL, (xname),				\
    316 		    (q)->qname##_##evname##_evcnt_name);		\
    317 	} while (0)
    318 
    319 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    320 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    321 
    322 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    323 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    324 
    325 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    326 	evcnt_detach(&(q)->qname##_ev_##evname);
    327 #endif /* WM_EVENT_COUNTERS */
    328 
    329 struct wm_txqueue {
    330 	kmutex_t *txq_lock;		/* lock for tx operations */
    331 
    332 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    333 
    334 	/* Software state for the transmit descriptors. */
    335 	int txq_num;			/* must be a power of two */
    336 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    337 
    338 	/* TX control data structures. */
    339 	int txq_ndesc;			/* must be a power of two */
    340 	size_t txq_descsize;		/* a tx descriptor size */
    341 	txdescs_t *txq_descs_u;
    342 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    343 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    344 	int txq_desc_rseg;		/* real number of control segment */
    345 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    346 #define	txq_descs	txq_descs_u->sctxu_txdescs
    347 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    348 
    349 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    350 
    351 	int txq_free;			/* number of free Tx descriptors */
    352 	int txq_next;			/* next ready Tx descriptor */
    353 
    354 	int txq_sfree;			/* number of free Tx jobs */
    355 	int txq_snext;			/* next free Tx job */
    356 	int txq_sdirty;			/* dirty Tx jobs */
    357 
    358 	/* These 4 variables are used only on the 82547. */
    359 	int txq_fifo_size;		/* Tx FIFO size */
    360 	int txq_fifo_head;		/* current head of FIFO */
    361 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    362 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    363 
    364 	/*
    365 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    366 	 * CPUs. This queue intermediate them without block.
    367 	 */
    368 	pcq_t *txq_interq;
    369 
    370 	/*
    371 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    372 	 * to manage Tx H/W queue's busy flag.
    373 	 */
    374 	int txq_flags;			/* flags for H/W queue, see below */
    375 #define	WM_TXQ_NO_SPACE	0x1
    376 
    377 	bool txq_stopping;
    378 
    379 	bool txq_sending;
    380 	time_t txq_lastsent;
    381 
    382 	uint32_t txq_packets;		/* for AIM */
    383 	uint32_t txq_bytes;		/* for AIM */
    384 #ifdef WM_EVENT_COUNTERS
    385 	/* TX event counters */
    386 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    387 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    388 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    389 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    390 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    391 					    /* XXX not used? */
    392 
    393 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    394 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    395 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    396 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    397 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    398 	WM_Q_EVCNT_DEFINE(txq, tsopain)	    /* Painful header manip. for TSO */
    399 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    400 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    401 					    /* other than toomanyseg */
    402 
    403 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    404 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    405 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    406 
    407 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    408 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    409 #endif /* WM_EVENT_COUNTERS */
    410 };
    411 
    412 struct wm_rxqueue {
    413 	kmutex_t *rxq_lock;		/* lock for rx operations */
    414 
    415 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    416 
    417 	/* Software state for the receive descriptors. */
    418 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    419 
    420 	/* RX control data structures. */
    421 	int rxq_ndesc;			/* must be a power of two */
    422 	size_t rxq_descsize;		/* a rx descriptor size */
    423 	rxdescs_t *rxq_descs_u;
    424 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    425 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    426 	int rxq_desc_rseg;		/* real number of control segment */
    427 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    428 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    429 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    430 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    431 
    432 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    433 
    434 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    435 	int rxq_discard;
    436 	int rxq_len;
    437 	struct mbuf *rxq_head;
    438 	struct mbuf *rxq_tail;
    439 	struct mbuf **rxq_tailp;
    440 
    441 	bool rxq_stopping;
    442 
    443 	uint32_t rxq_packets;		/* for AIM */
    444 	uint32_t rxq_bytes;		/* for AIM */
    445 #ifdef WM_EVENT_COUNTERS
    446 	/* RX event counters */
    447 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    448 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    449 
    450 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    451 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    452 #endif
    453 };
    454 
    455 struct wm_queue {
    456 	int wmq_id;			/* index of TX/RX queues */
    457 	int wmq_intr_idx;		/* index of MSI-X tables */
    458 
    459 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    460 	bool wmq_set_itr;
    461 
    462 	struct wm_txqueue wmq_txq;
    463 	struct wm_rxqueue wmq_rxq;
    464 
    465 	bool wmq_txrx_use_workqueue;
    466 	struct work wmq_cookie;
    467 	void *wmq_si;
    468 	krndsource_t rnd_source;	/* random source */
    469 };
    470 
    471 struct wm_phyop {
    472 	int (*acquire)(struct wm_softc *);
    473 	void (*release)(struct wm_softc *);
    474 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    475 	int (*writereg_locked)(device_t, int, int, uint16_t);
    476 	int reset_delay_us;
    477 	bool no_errprint;
    478 };
    479 
    480 struct wm_nvmop {
    481 	int (*acquire)(struct wm_softc *);
    482 	void (*release)(struct wm_softc *);
    483 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    484 };
    485 
    486 /*
    487  * Software state per device.
    488  */
    489 struct wm_softc {
    490 	device_t sc_dev;		/* generic device information */
    491 	bus_space_tag_t sc_st;		/* bus space tag */
    492 	bus_space_handle_t sc_sh;	/* bus space handle */
    493 	bus_size_t sc_ss;		/* bus space size */
    494 	bus_space_tag_t sc_iot;		/* I/O space tag */
    495 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    496 	bus_size_t sc_ios;		/* I/O space size */
    497 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    498 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    499 	bus_size_t sc_flashs;		/* flash registers space size */
    500 	off_t sc_flashreg_offset;	/*
    501 					 * offset to flash registers from
    502 					 * start of BAR
    503 					 */
    504 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    505 
    506 	struct ethercom sc_ethercom;	/* ethernet common data */
    507 	struct mii_data sc_mii;		/* MII/media information */
    508 
    509 	pci_chipset_tag_t sc_pc;
    510 	pcitag_t sc_pcitag;
    511 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    512 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    513 
    514 	uint16_t sc_pcidevid;		/* PCI device ID */
    515 	wm_chip_type sc_type;		/* MAC type */
    516 	int sc_rev;			/* MAC revision */
    517 	wm_phy_type sc_phytype;		/* PHY type */
    518 	uint8_t sc_sfptype;		/* SFP type */
    519 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    520 #define	WM_MEDIATYPE_UNKNOWN		0x00
    521 #define	WM_MEDIATYPE_FIBER		0x01
    522 #define	WM_MEDIATYPE_COPPER		0x02
    523 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    524 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    525 	int sc_flags;			/* flags; see below */
    526 	u_short sc_if_flags;		/* last if_flags */
    527 	int sc_ec_capenable;		/* last ec_capenable */
    528 	int sc_flowflags;		/* 802.3x flow control flags */
    529 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    530 	int sc_align_tweak;
    531 
    532 	void *sc_ihs[WM_MAX_NINTR];	/*
    533 					 * interrupt cookie.
    534 					 * - legacy and msi use sc_ihs[0] only
    535 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    536 					 */
    537 	pci_intr_handle_t *sc_intrs;	/*
    538 					 * legacy and msi use sc_intrs[0] only
    539 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    540 					 */
    541 	int sc_nintrs;			/* number of interrupts */
    542 
    543 	int sc_link_intr_idx;		/* index of MSI-X tables */
    544 
    545 	callout_t sc_tick_ch;		/* tick callout */
    546 	bool sc_core_stopping;
    547 
    548 	int sc_nvm_ver_major;
    549 	int sc_nvm_ver_minor;
    550 	int sc_nvm_ver_build;
    551 	int sc_nvm_addrbits;		/* NVM address bits */
    552 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    553 	int sc_ich8_flash_base;
    554 	int sc_ich8_flash_bank_size;
    555 	int sc_nvm_k1_enabled;
    556 
    557 	int sc_nqueues;
    558 	struct wm_queue *sc_queue;
    559 	u_int sc_tx_process_limit;	/* Tx proc. repeat limit in softint */
    560 	u_int sc_tx_intr_process_limit;	/* Tx proc. repeat limit in H/W intr */
    561 	u_int sc_rx_process_limit;	/* Rx proc. repeat limit in softint */
    562 	u_int sc_rx_intr_process_limit;	/* Rx proc. repeat limit in H/W intr */
    563 	struct workqueue *sc_queue_wq;
    564 	bool sc_txrx_use_workqueue;
    565 
    566 	int sc_affinity_offset;
    567 
    568 #ifdef WM_EVENT_COUNTERS
    569 	/* Event counters. */
    570 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    571 
    572 	/* WM_T_82542_2_1 only */
    573 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    574 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    575 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    576 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    577 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    578 #endif /* WM_EVENT_COUNTERS */
    579 
    580 	struct sysctllog *sc_sysctllog;
    581 
    582 	/* This variable are used only on the 82547. */
    583 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    584 
    585 	uint32_t sc_ctrl;		/* prototype CTRL register */
    586 #if 0
    587 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    588 #endif
    589 	uint32_t sc_icr;		/* prototype interrupt bits */
    590 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    591 	uint32_t sc_tctl;		/* prototype TCTL register */
    592 	uint32_t sc_rctl;		/* prototype RCTL register */
    593 	uint32_t sc_txcw;		/* prototype TXCW register */
    594 	uint32_t sc_tipg;		/* prototype TIPG register */
    595 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    596 	uint32_t sc_pba;		/* prototype PBA register */
    597 
    598 	int sc_tbi_linkup;		/* TBI link status */
    599 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    600 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    601 
    602 	int sc_mchash_type;		/* multicast filter offset */
    603 
    604 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    605 
    606 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    607 	kmutex_t *sc_ich_phymtx;	/*
    608 					 * 82574/82583/ICH/PCH specific PHY
    609 					 * mutex. For 82574/82583, the mutex
    610 					 * is used for both PHY and NVM.
    611 					 */
    612 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    613 
    614 	struct wm_phyop phy;
    615 	struct wm_nvmop nvm;
    616 };
    617 
    618 #define WM_CORE_LOCK(_sc)						\
    619 	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    620 #define WM_CORE_UNLOCK(_sc)						\
    621 	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    622 #define WM_CORE_LOCKED(_sc)						\
    623 	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    624 
    625 #define	WM_RXCHAIN_RESET(rxq)						\
    626 do {									\
    627 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    628 	*(rxq)->rxq_tailp = NULL;					\
    629 	(rxq)->rxq_len = 0;						\
    630 } while (/*CONSTCOND*/0)
    631 
    632 #define	WM_RXCHAIN_LINK(rxq, m)						\
    633 do {									\
    634 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    635 	(rxq)->rxq_tailp = &(m)->m_next;				\
    636 } while (/*CONSTCOND*/0)
    637 
    638 #ifdef WM_EVENT_COUNTERS
    639 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    640 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    641 
    642 #define WM_Q_EVCNT_INCR(qname, evname)			\
    643 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    644 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    645 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    646 #else /* !WM_EVENT_COUNTERS */
    647 #define	WM_EVCNT_INCR(ev)	/* nothing */
    648 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    649 
    650 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    651 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    652 #endif /* !WM_EVENT_COUNTERS */
    653 
    654 #define	CSR_READ(sc, reg)						\
    655 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    656 #define	CSR_WRITE(sc, reg, val)						\
    657 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    658 #define	CSR_WRITE_FLUSH(sc)						\
    659 	(void)CSR_READ((sc), WMREG_STATUS)
    660 
    661 #define ICH8_FLASH_READ32(sc, reg)					\
    662 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    663 	    (reg) + sc->sc_flashreg_offset)
    664 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    665 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    666 	    (reg) + sc->sc_flashreg_offset, (data))
    667 
    668 #define ICH8_FLASH_READ16(sc, reg)					\
    669 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    670 	    (reg) + sc->sc_flashreg_offset)
    671 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    672 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    673 	    (reg) + sc->sc_flashreg_offset, (data))
    674 
    675 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    676 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    677 
    678 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    679 #define	WM_CDTXADDR_HI(txq, x)						\
    680 	(sizeof(bus_addr_t) == 8 ?					\
    681 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    682 
    683 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    684 #define	WM_CDRXADDR_HI(rxq, x)						\
    685 	(sizeof(bus_addr_t) == 8 ?					\
    686 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    687 
    688 /*
    689  * Register read/write functions.
    690  * Other than CSR_{READ|WRITE}().
    691  */
    692 #if 0
    693 static inline uint32_t wm_io_read(struct wm_softc *, int);
    694 #endif
    695 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    696 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    697     uint32_t, uint32_t);
    698 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    699 
    700 /*
    701  * Descriptor sync/init functions.
    702  */
    703 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    704 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    705 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    706 
    707 /*
    708  * Device driver interface functions and commonly used functions.
    709  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    710  */
    711 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    712 static int	wm_match(device_t, cfdata_t, void *);
    713 static void	wm_attach(device_t, device_t, void *);
    714 static int	wm_detach(device_t, int);
    715 static bool	wm_suspend(device_t, const pmf_qual_t *);
    716 static bool	wm_resume(device_t, const pmf_qual_t *);
    717 static void	wm_watchdog(struct ifnet *);
    718 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    719     uint16_t *);
    720 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    721     uint16_t *);
    722 static void	wm_tick(void *);
    723 static int	wm_ifflags_cb(struct ethercom *);
    724 static int	wm_ioctl(struct ifnet *, u_long, void *);
    725 /* MAC address related */
    726 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    727 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    728 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    729 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    730 static int	wm_rar_count(struct wm_softc *);
    731 static void	wm_set_filter(struct wm_softc *);
    732 /* Reset and init related */
    733 static void	wm_set_vlan(struct wm_softc *);
    734 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    735 static void	wm_get_auto_rd_done(struct wm_softc *);
    736 static void	wm_lan_init_done(struct wm_softc *);
    737 static void	wm_get_cfg_done(struct wm_softc *);
    738 static int	wm_phy_post_reset(struct wm_softc *);
    739 static int	wm_write_smbus_addr(struct wm_softc *);
    740 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    741 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    742 static void	wm_initialize_hardware_bits(struct wm_softc *);
    743 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    744 static int	wm_reset_phy(struct wm_softc *);
    745 static void	wm_flush_desc_rings(struct wm_softc *);
    746 static void	wm_reset(struct wm_softc *);
    747 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    748 static void	wm_rxdrain(struct wm_rxqueue *);
    749 static void	wm_init_rss(struct wm_softc *);
    750 static void	wm_adjust_qnum(struct wm_softc *, int);
    751 static inline bool	wm_is_using_msix(struct wm_softc *);
    752 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    753 static int	wm_softint_establish(struct wm_softc *, int, int);
    754 static int	wm_setup_legacy(struct wm_softc *);
    755 static int	wm_setup_msix(struct wm_softc *);
    756 static int	wm_init(struct ifnet *);
    757 static int	wm_init_locked(struct ifnet *);
    758 static void	wm_init_sysctls(struct wm_softc *);
    759 static void	wm_unset_stopping_flags(struct wm_softc *);
    760 static void	wm_set_stopping_flags(struct wm_softc *);
    761 static void	wm_stop(struct ifnet *, int);
    762 static void	wm_stop_locked(struct ifnet *, int);
    763 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    764 static void	wm_82547_txfifo_stall(void *);
    765 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    766 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    767 /* DMA related */
    768 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    769 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    770 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    771 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    772     struct wm_txqueue *);
    773 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    774 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    775 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    776     struct wm_rxqueue *);
    777 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    778 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    779 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    780 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    781 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    782 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    783 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    784     struct wm_txqueue *);
    785 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    786     struct wm_rxqueue *);
    787 static int	wm_alloc_txrx_queues(struct wm_softc *);
    788 static void	wm_free_txrx_queues(struct wm_softc *);
    789 static int	wm_init_txrx_queues(struct wm_softc *);
    790 /* Start */
    791 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    792     struct wm_txsoft *, uint32_t *, uint8_t *);
    793 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    794 static void	wm_start(struct ifnet *);
    795 static void	wm_start_locked(struct ifnet *);
    796 static int	wm_transmit(struct ifnet *, struct mbuf *);
    797 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    798 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    799     bool);
    800 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    801     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    802 static void	wm_nq_start(struct ifnet *);
    803 static void	wm_nq_start_locked(struct ifnet *);
    804 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    805 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    806 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    807     bool);
    808 static void	wm_deferred_start_locked(struct wm_txqueue *);
    809 static void	wm_handle_queue(void *);
    810 static void	wm_handle_queue_work(struct work *, void *);
    811 /* Interrupt */
    812 static bool	wm_txeof(struct wm_txqueue *, u_int);
    813 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    814 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    815 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    816 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    817 static void	wm_linkintr(struct wm_softc *, uint32_t);
    818 static int	wm_intr_legacy(void *);
    819 static inline void	wm_txrxintr_disable(struct wm_queue *);
    820 static inline void	wm_txrxintr_enable(struct wm_queue *);
    821 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    822 static int	wm_txrxintr_msix(void *);
    823 static int	wm_linkintr_msix(void *);
    824 
    825 /*
    826  * Media related.
    827  * GMII, SGMII, TBI, SERDES and SFP.
    828  */
    829 /* Common */
    830 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    831 /* GMII related */
    832 static void	wm_gmii_reset(struct wm_softc *);
    833 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    834 static int	wm_get_phy_id_82575(struct wm_softc *);
    835 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    836 static int	wm_gmii_mediachange(struct ifnet *);
    837 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    838 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    839 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    840 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    841 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    842 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    843 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    844 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    845 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    846 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    847 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    848 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    849 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    850 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    851 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    852 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    853 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    854 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    855 	bool);
    856 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    857 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    858 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    859 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    860 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    861 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    862 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    863 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    864 static void	wm_gmii_statchg(struct ifnet *);
    865 /*
    866  * kumeran related (80003, ICH* and PCH*).
    867  * These functions are not for accessing MII registers but for accessing
    868  * kumeran specific registers.
    869  */
    870 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    871 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    872 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    873 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    874 /* EMI register related */
    875 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    876 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    877 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    878 /* SGMII */
    879 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    880 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    881 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    882 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    883 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    884 /* TBI related */
    885 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    886 static void	wm_tbi_mediainit(struct wm_softc *);
    887 static int	wm_tbi_mediachange(struct ifnet *);
    888 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    889 static int	wm_check_for_link(struct wm_softc *);
    890 static void	wm_tbi_tick(struct wm_softc *);
    891 /* SERDES related */
    892 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    893 static int	wm_serdes_mediachange(struct ifnet *);
    894 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    895 static void	wm_serdes_tick(struct wm_softc *);
    896 /* SFP related */
    897 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    898 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    899 
    900 /*
    901  * NVM related.
    902  * Microwire, SPI (w/wo EERD) and Flash.
    903  */
    904 /* Misc functions */
    905 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    906 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    907 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    908 /* Microwire */
    909 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    910 /* SPI */
    911 static int	wm_nvm_ready_spi(struct wm_softc *);
    912 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    913 /* Using with EERD */
    914 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    915 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    916 /* Flash */
    917 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    918     unsigned int *);
    919 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    920 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    921 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    922     uint32_t *);
    923 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    924 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    925 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    926 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    927 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    928 /* iNVM */
    929 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    930 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    931 /* Lock, detecting NVM type, validate checksum and read */
    932 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    933 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    934 static int	wm_nvm_validate_checksum(struct wm_softc *);
    935 static void	wm_nvm_version_invm(struct wm_softc *);
    936 static void	wm_nvm_version(struct wm_softc *);
    937 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    938 
    939 /*
    940  * Hardware semaphores.
    941  * Very complexed...
    942  */
    943 static int	wm_get_null(struct wm_softc *);
    944 static void	wm_put_null(struct wm_softc *);
    945 static int	wm_get_eecd(struct wm_softc *);
    946 static void	wm_put_eecd(struct wm_softc *);
    947 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    948 static void	wm_put_swsm_semaphore(struct wm_softc *);
    949 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    950 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    951 static int	wm_get_nvm_80003(struct wm_softc *);
    952 static void	wm_put_nvm_80003(struct wm_softc *);
    953 static int	wm_get_nvm_82571(struct wm_softc *);
    954 static void	wm_put_nvm_82571(struct wm_softc *);
    955 static int	wm_get_phy_82575(struct wm_softc *);
    956 static void	wm_put_phy_82575(struct wm_softc *);
    957 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    958 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    959 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    960 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    961 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    962 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    963 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    964 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    965 
    966 /*
    967  * Management mode and power management related subroutines.
    968  * BMC, AMT, suspend/resume and EEE.
    969  */
    970 #if 0
    971 static int	wm_check_mng_mode(struct wm_softc *);
    972 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    973 static int	wm_check_mng_mode_82574(struct wm_softc *);
    974 static int	wm_check_mng_mode_generic(struct wm_softc *);
    975 #endif
    976 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    977 static bool	wm_phy_resetisblocked(struct wm_softc *);
    978 static void	wm_get_hw_control(struct wm_softc *);
    979 static void	wm_release_hw_control(struct wm_softc *);
    980 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    981 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    982 static void	wm_init_manageability(struct wm_softc *);
    983 static void	wm_release_manageability(struct wm_softc *);
    984 static void	wm_get_wakeup(struct wm_softc *);
    985 static int	wm_ulp_disable(struct wm_softc *);
    986 static int	wm_enable_phy_wakeup(struct wm_softc *);
    987 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    988 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    989 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    990 static void	wm_enable_wakeup(struct wm_softc *);
    991 static void	wm_disable_aspm(struct wm_softc *);
    992 /* LPLU (Low Power Link Up) */
    993 static void	wm_lplu_d0_disable(struct wm_softc *);
    994 /* EEE */
    995 static int	wm_set_eee_i350(struct wm_softc *);
    996 static int	wm_set_eee_pchlan(struct wm_softc *);
    997 static int	wm_set_eee(struct wm_softc *);
    998 
    999 /*
   1000  * Workarounds (mainly PHY related).
   1001  * Basically, PHY's workarounds are in the PHY drivers.
   1002  */
   1003 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
   1004 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
   1005 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
   1006 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
   1007 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
   1008 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
   1009 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
   1010 static int	wm_k1_workaround_lv(struct wm_softc *);
   1011 static int	wm_link_stall_workaround_hv(struct wm_softc *);
   1012 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
   1013 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
   1014 static void	wm_reset_init_script_82575(struct wm_softc *);
   1015 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1016 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1017 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1018 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1019 static int	wm_pll_workaround_i210(struct wm_softc *);
   1020 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1021 
   1022 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1023     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1024 
   1025 /*
   1026  * Devices supported by this driver.
   1027  */
   1028 static const struct wm_product {
   1029 	pci_vendor_id_t		wmp_vendor;
   1030 	pci_product_id_t	wmp_product;
   1031 	const char		*wmp_name;
   1032 	wm_chip_type		wmp_type;
   1033 	uint32_t		wmp_flags;
   1034 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1035 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1036 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1037 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1038 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1039 } wm_products[] = {
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1041 	  "Intel i82542 1000BASE-X Ethernet",
   1042 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1045 	  "Intel i82543GC 1000BASE-X Ethernet",
   1046 	  WM_T_82543,		WMP_F_FIBER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1049 	  "Intel i82543GC 1000BASE-T Ethernet",
   1050 	  WM_T_82543,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1053 	  "Intel i82544EI 1000BASE-T Ethernet",
   1054 	  WM_T_82544,		WMP_F_COPPER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1057 	  "Intel i82544EI 1000BASE-X Ethernet",
   1058 	  WM_T_82544,		WMP_F_FIBER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1061 	  "Intel i82544GC 1000BASE-T Ethernet",
   1062 	  WM_T_82544,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1065 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1066 	  WM_T_82544,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1069 	  "Intel i82540EM 1000BASE-T Ethernet",
   1070 	  WM_T_82540,		WMP_F_COPPER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1073 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1074 	  WM_T_82540,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1077 	  "Intel i82540EP 1000BASE-T Ethernet",
   1078 	  WM_T_82540,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1081 	  "Intel i82540EP 1000BASE-T Ethernet",
   1082 	  WM_T_82540,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1085 	  "Intel i82540EP 1000BASE-T Ethernet",
   1086 	  WM_T_82540,		WMP_F_COPPER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1089 	  "Intel i82545EM 1000BASE-T Ethernet",
   1090 	  WM_T_82545,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1093 	  "Intel i82545GM 1000BASE-T Ethernet",
   1094 	  WM_T_82545_3,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1097 	  "Intel i82545GM 1000BASE-X Ethernet",
   1098 	  WM_T_82545_3,		WMP_F_FIBER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1101 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1102 	  WM_T_82545_3,		WMP_F_SERDES },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1105 	  "Intel i82546EB 1000BASE-T Ethernet",
   1106 	  WM_T_82546,		WMP_F_COPPER },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1109 	  "Intel i82546EB 1000BASE-T Ethernet",
   1110 	  WM_T_82546,		WMP_F_COPPER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1113 	  "Intel i82545EM 1000BASE-X Ethernet",
   1114 	  WM_T_82545,		WMP_F_FIBER },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1117 	  "Intel i82546EB 1000BASE-X Ethernet",
   1118 	  WM_T_82546,		WMP_F_FIBER },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1121 	  "Intel i82546GB 1000BASE-T Ethernet",
   1122 	  WM_T_82546_3,		WMP_F_COPPER },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1125 	  "Intel i82546GB 1000BASE-X Ethernet",
   1126 	  WM_T_82546_3,		WMP_F_FIBER },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1129 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1130 	  WM_T_82546_3,		WMP_F_SERDES },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1133 	  "i82546GB quad-port Gigabit Ethernet",
   1134 	  WM_T_82546_3,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1137 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1138 	  WM_T_82546_3,		WMP_F_COPPER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1141 	  "Intel PRO/1000MT (82546GB)",
   1142 	  WM_T_82546_3,		WMP_F_COPPER },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1145 	  "Intel i82541EI 1000BASE-T Ethernet",
   1146 	  WM_T_82541,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1149 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1150 	  WM_T_82541,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1153 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1154 	  WM_T_82541,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1157 	  "Intel i82541ER 1000BASE-T Ethernet",
   1158 	  WM_T_82541_2,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1161 	  "Intel i82541GI 1000BASE-T Ethernet",
   1162 	  WM_T_82541_2,		WMP_F_COPPER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1165 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1166 	  WM_T_82541_2,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1169 	  "Intel i82541PI 1000BASE-T Ethernet",
   1170 	  WM_T_82541_2,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1173 	  "Intel i82547EI 1000BASE-T Ethernet",
   1174 	  WM_T_82547,		WMP_F_COPPER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1177 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1178 	  WM_T_82547,		WMP_F_COPPER },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1181 	  "Intel i82547GI 1000BASE-T Ethernet",
   1182 	  WM_T_82547_2,		WMP_F_COPPER },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1185 	  "Intel PRO/1000 PT (82571EB)",
   1186 	  WM_T_82571,		WMP_F_COPPER },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1189 	  "Intel PRO/1000 PF (82571EB)",
   1190 	  WM_T_82571,		WMP_F_FIBER },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1193 	  "Intel PRO/1000 PB (82571EB)",
   1194 	  WM_T_82571,		WMP_F_SERDES },
   1195 
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1197 	  "Intel PRO/1000 QT (82571EB)",
   1198 	  WM_T_82571,		WMP_F_COPPER },
   1199 
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1201 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1202 	  WM_T_82571,		WMP_F_COPPER },
   1203 
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1205 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1206 	  WM_T_82571,		WMP_F_COPPER },
   1207 
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1209 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1210 	  WM_T_82571,		WMP_F_SERDES },
   1211 
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1213 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1214 	  WM_T_82571,		WMP_F_SERDES },
   1215 
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1217 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1218 	  WM_T_82571,		WMP_F_FIBER },
   1219 
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1221 	  "Intel i82572EI 1000baseT Ethernet",
   1222 	  WM_T_82572,		WMP_F_COPPER },
   1223 
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1225 	  "Intel i82572EI 1000baseX Ethernet",
   1226 	  WM_T_82572,		WMP_F_FIBER },
   1227 
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1229 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1230 	  WM_T_82572,		WMP_F_SERDES },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1233 	  "Intel i82572EI 1000baseT Ethernet",
   1234 	  WM_T_82572,		WMP_F_COPPER },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1237 	  "Intel i82573E",
   1238 	  WM_T_82573,		WMP_F_COPPER },
   1239 
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1241 	  "Intel i82573E IAMT",
   1242 	  WM_T_82573,		WMP_F_COPPER },
   1243 
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1245 	  "Intel i82573L Gigabit Ethernet",
   1246 	  WM_T_82573,		WMP_F_COPPER },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1249 	  "Intel i82574L",
   1250 	  WM_T_82574,		WMP_F_COPPER },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1253 	  "Intel i82574L",
   1254 	  WM_T_82574,		WMP_F_COPPER },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1257 	  "Intel i82583V",
   1258 	  WM_T_82583,		WMP_F_COPPER },
   1259 
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1261 	  "i80003 dual 1000baseT Ethernet",
   1262 	  WM_T_80003,		WMP_F_COPPER },
   1263 
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1265 	  "i80003 dual 1000baseX Ethernet",
   1266 	  WM_T_80003,		WMP_F_COPPER },
   1267 
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1269 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1270 	  WM_T_80003,		WMP_F_SERDES },
   1271 
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1273 	  "Intel i80003 1000baseT Ethernet",
   1274 	  WM_T_80003,		WMP_F_COPPER },
   1275 
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1277 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1278 	  WM_T_80003,		WMP_F_SERDES },
   1279 
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1281 	  "Intel i82801H (M_AMT) LAN Controller",
   1282 	  WM_T_ICH8,		WMP_F_COPPER },
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1284 	  "Intel i82801H (AMT) LAN Controller",
   1285 	  WM_T_ICH8,		WMP_F_COPPER },
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1287 	  "Intel i82801H LAN Controller",
   1288 	  WM_T_ICH8,		WMP_F_COPPER },
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1290 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1291 	  WM_T_ICH8,		WMP_F_COPPER },
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1293 	  "Intel i82801H (M) LAN Controller",
   1294 	  WM_T_ICH8,		WMP_F_COPPER },
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1296 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1297 	  WM_T_ICH8,		WMP_F_COPPER },
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1299 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1300 	  WM_T_ICH8,		WMP_F_COPPER },
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1302 	  "82567V-3 LAN Controller",
   1303 	  WM_T_ICH8,		WMP_F_COPPER },
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1305 	  "82801I (AMT) LAN Controller",
   1306 	  WM_T_ICH9,		WMP_F_COPPER },
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1308 	  "82801I 10/100 LAN Controller",
   1309 	  WM_T_ICH9,		WMP_F_COPPER },
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1311 	  "82801I (G) 10/100 LAN Controller",
   1312 	  WM_T_ICH9,		WMP_F_COPPER },
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1314 	  "82801I (GT) 10/100 LAN Controller",
   1315 	  WM_T_ICH9,		WMP_F_COPPER },
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1317 	  "82801I (C) LAN Controller",
   1318 	  WM_T_ICH9,		WMP_F_COPPER },
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1320 	  "82801I mobile LAN Controller",
   1321 	  WM_T_ICH9,		WMP_F_COPPER },
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1323 	  "82801I mobile (V) LAN Controller",
   1324 	  WM_T_ICH9,		WMP_F_COPPER },
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1326 	  "82801I mobile (AMT) LAN Controller",
   1327 	  WM_T_ICH9,		WMP_F_COPPER },
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1329 	  "82567LM-4 LAN Controller",
   1330 	  WM_T_ICH9,		WMP_F_COPPER },
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1332 	  "82567LM-2 LAN Controller",
   1333 	  WM_T_ICH10,		WMP_F_COPPER },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1335 	  "82567LF-2 LAN Controller",
   1336 	  WM_T_ICH10,		WMP_F_COPPER },
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1338 	  "82567LM-3 LAN Controller",
   1339 	  WM_T_ICH10,		WMP_F_COPPER },
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1341 	  "82567LF-3 LAN Controller",
   1342 	  WM_T_ICH10,		WMP_F_COPPER },
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1344 	  "82567V-2 LAN Controller",
   1345 	  WM_T_ICH10,		WMP_F_COPPER },
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1347 	  "82567V-3? LAN Controller",
   1348 	  WM_T_ICH10,		WMP_F_COPPER },
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1350 	  "HANKSVILLE LAN Controller",
   1351 	  WM_T_ICH10,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1353 	  "PCH LAN (82577LM) Controller",
   1354 	  WM_T_PCH,		WMP_F_COPPER },
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1356 	  "PCH LAN (82577LC) Controller",
   1357 	  WM_T_PCH,		WMP_F_COPPER },
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1359 	  "PCH LAN (82578DM) Controller",
   1360 	  WM_T_PCH,		WMP_F_COPPER },
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1362 	  "PCH LAN (82578DC) Controller",
   1363 	  WM_T_PCH,		WMP_F_COPPER },
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1365 	  "PCH2 LAN (82579LM) Controller",
   1366 	  WM_T_PCH2,		WMP_F_COPPER },
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1368 	  "PCH2 LAN (82579V) Controller",
   1369 	  WM_T_PCH2,		WMP_F_COPPER },
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1371 	  "82575EB dual-1000baseT Ethernet",
   1372 	  WM_T_82575,		WMP_F_COPPER },
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1374 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1375 	  WM_T_82575,		WMP_F_SERDES },
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1377 	  "82575GB quad-1000baseT Ethernet",
   1378 	  WM_T_82575,		WMP_F_COPPER },
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1380 	  "82575GB quad-1000baseT Ethernet (PM)",
   1381 	  WM_T_82575,		WMP_F_COPPER },
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1383 	  "82576 1000BaseT Ethernet",
   1384 	  WM_T_82576,		WMP_F_COPPER },
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1386 	  "82576 1000BaseX Ethernet",
   1387 	  WM_T_82576,		WMP_F_FIBER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1390 	  "82576 gigabit Ethernet (SERDES)",
   1391 	  WM_T_82576,		WMP_F_SERDES },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1394 	  "82576 quad-1000BaseT Ethernet",
   1395 	  WM_T_82576,		WMP_F_COPPER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1398 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1399 	  WM_T_82576,		WMP_F_COPPER },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1402 	  "82576 gigabit Ethernet",
   1403 	  WM_T_82576,		WMP_F_COPPER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1406 	  "82576 gigabit Ethernet (SERDES)",
   1407 	  WM_T_82576,		WMP_F_SERDES },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1409 	  "82576 quad-gigabit Ethernet (SERDES)",
   1410 	  WM_T_82576,		WMP_F_SERDES },
   1411 
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1413 	  "82580 1000BaseT Ethernet",
   1414 	  WM_T_82580,		WMP_F_COPPER },
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1416 	  "82580 1000BaseX Ethernet",
   1417 	  WM_T_82580,		WMP_F_FIBER },
   1418 
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1420 	  "82580 1000BaseT Ethernet (SERDES)",
   1421 	  WM_T_82580,		WMP_F_SERDES },
   1422 
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1424 	  "82580 gigabit Ethernet (SGMII)",
   1425 	  WM_T_82580,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1427 	  "82580 dual-1000BaseT Ethernet",
   1428 	  WM_T_82580,		WMP_F_COPPER },
   1429 
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1431 	  "82580 quad-1000BaseX Ethernet",
   1432 	  WM_T_82580,		WMP_F_FIBER },
   1433 
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1435 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1436 	  WM_T_82580,		WMP_F_COPPER },
   1437 
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1439 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1440 	  WM_T_82580,		WMP_F_SERDES },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1443 	  "DH89XXCC 1000BASE-KX Ethernet",
   1444 	  WM_T_82580,		WMP_F_SERDES },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1447 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1448 	  WM_T_82580,		WMP_F_SERDES },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1451 	  "I350 Gigabit Network Connection",
   1452 	  WM_T_I350,		WMP_F_COPPER },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1455 	  "I350 Gigabit Fiber Network Connection",
   1456 	  WM_T_I350,		WMP_F_FIBER },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1459 	  "I350 Gigabit Backplane Connection",
   1460 	  WM_T_I350,		WMP_F_SERDES },
   1461 
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1463 	  "I350 Quad Port Gigabit Ethernet",
   1464 	  WM_T_I350,		WMP_F_SERDES },
   1465 
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1467 	  "I350 Gigabit Connection",
   1468 	  WM_T_I350,		WMP_F_COPPER },
   1469 
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1471 	  "I354 Gigabit Ethernet (KX)",
   1472 	  WM_T_I354,		WMP_F_SERDES },
   1473 
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1475 	  "I354 Gigabit Ethernet (SGMII)",
   1476 	  WM_T_I354,		WMP_F_COPPER },
   1477 
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1479 	  "I354 Gigabit Ethernet (2.5G)",
   1480 	  WM_T_I354,		WMP_F_COPPER },
   1481 
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1483 	  "I210-T1 Ethernet Server Adapter",
   1484 	  WM_T_I210,		WMP_F_COPPER },
   1485 
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1487 	  "I210 Ethernet (Copper OEM)",
   1488 	  WM_T_I210,		WMP_F_COPPER },
   1489 
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1491 	  "I210 Ethernet (Copper IT)",
   1492 	  WM_T_I210,		WMP_F_COPPER },
   1493 
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1495 	  "I210 Ethernet (Copper, FLASH less)",
   1496 	  WM_T_I210,		WMP_F_COPPER },
   1497 
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1499 	  "I210 Gigabit Ethernet (Fiber)",
   1500 	  WM_T_I210,		WMP_F_FIBER },
   1501 
   1502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1503 	  "I210 Gigabit Ethernet (SERDES)",
   1504 	  WM_T_I210,		WMP_F_SERDES },
   1505 
   1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1507 	  "I210 Gigabit Ethernet (SERDES, FLASH less)",
   1508 	  WM_T_I210,		WMP_F_SERDES },
   1509 
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1511 	  "I210 Gigabit Ethernet (SGMII)",
   1512 	  WM_T_I210,		WMP_F_COPPER },
   1513 
   1514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII_WOF,
   1515 	  "I210 Gigabit Ethernet (SGMII, FLASH less)",
   1516 	  WM_T_I210,		WMP_F_COPPER },
   1517 
   1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1519 	  "I211 Ethernet (COPPER)",
   1520 	  WM_T_I211,		WMP_F_COPPER },
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1522 	  "I217 V Ethernet Connection",
   1523 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1525 	  "I217 LM Ethernet Connection",
   1526 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1528 	  "I218 V Ethernet Connection",
   1529 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1531 	  "I218 V Ethernet Connection",
   1532 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1534 	  "I218 V Ethernet Connection",
   1535 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1537 	  "I218 LM Ethernet Connection",
   1538 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1540 	  "I218 LM Ethernet Connection",
   1541 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1543 	  "I218 LM Ethernet Connection",
   1544 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1546 	  "I219 LM Ethernet Connection",
   1547 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1548 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1549 	  "I219 LM Ethernet Connection",
   1550 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1551 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1552 	  "I219 LM Ethernet Connection",
   1553 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1554 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1555 	  "I219 LM Ethernet Connection",
   1556 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1557 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1558 	  "I219 LM Ethernet Connection",
   1559 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1560 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1561 	  "I219 LM Ethernet Connection",
   1562 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1563 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1564 	  "I219 LM Ethernet Connection",
   1565 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1566 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM8,
   1567 	  "I219 LM Ethernet Connection",
   1568 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1569 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM9,
   1570 	  "I219 LM Ethernet Connection",
   1571 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1572 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM10,
   1573 	  "I219 LM Ethernet Connection",
   1574 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1575 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM11,
   1576 	  "I219 LM Ethernet Connection",
   1577 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1578 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM12,
   1579 	  "I219 LM Ethernet Connection",
   1580 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1581 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM13,
   1582 	  "I219 LM Ethernet Connection",
   1583 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1584 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM14,
   1585 	  "I219 LM Ethernet Connection",
   1586 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1587 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM15,
   1588 	  "I219 LM Ethernet Connection",
   1589 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1590 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1591 	  "I219 V Ethernet Connection",
   1592 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1593 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1594 	  "I219 V Ethernet Connection",
   1595 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1596 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1597 	  "I219 V Ethernet Connection",
   1598 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1599 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1600 	  "I219 V Ethernet Connection",
   1601 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1602 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1603 	  "I219 V Ethernet Connection",
   1604 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1605 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1606 	  "I219 V Ethernet Connection",
   1607 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1608 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V8,
   1609 	  "I219 V Ethernet Connection",
   1610 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1611 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V9,
   1612 	  "I219 V Ethernet Connection",
   1613 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1614 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V10,
   1615 	  "I219 V Ethernet Connection",
   1616 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1617 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V11,
   1618 	  "I219 V Ethernet Connection",
   1619 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1620 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V12,
   1621 	  "I219 V Ethernet Connection",
   1622 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1623 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V13,
   1624 	  "I219 V Ethernet Connection",
   1625 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1626 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V14,
   1627 	  "I219 V Ethernet Connection",
   1628 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1629 	{ 0,			0,
   1630 	  NULL,
   1631 	  0,			0 },
   1632 };
   1633 
   1634 /*
   1635  * Register read/write functions.
   1636  * Other than CSR_{READ|WRITE}().
   1637  */
   1638 
   1639 #if 0 /* Not currently used */
   1640 static inline uint32_t
   1641 wm_io_read(struct wm_softc *sc, int reg)
   1642 {
   1643 
   1644 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1645 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1646 }
   1647 #endif
   1648 
   1649 static inline void
   1650 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1651 {
   1652 
   1653 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1654 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1655 }
   1656 
   1657 static inline void
   1658 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1659     uint32_t data)
   1660 {
   1661 	uint32_t regval;
   1662 	int i;
   1663 
   1664 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1665 
   1666 	CSR_WRITE(sc, reg, regval);
   1667 
   1668 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1669 		delay(5);
   1670 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1671 			break;
   1672 	}
   1673 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1674 		aprint_error("%s: WARNING:"
   1675 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1676 		    device_xname(sc->sc_dev), reg);
   1677 	}
   1678 }
   1679 
   1680 static inline void
   1681 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1682 {
   1683 	wa->wa_low = htole32(v & 0xffffffffU);
   1684 	if (sizeof(bus_addr_t) == 8)
   1685 		wa->wa_high = htole32((uint64_t) v >> 32);
   1686 	else
   1687 		wa->wa_high = 0;
   1688 }
   1689 
   1690 /*
   1691  * Descriptor sync/init functions.
   1692  */
   1693 static inline void
   1694 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1695 {
   1696 	struct wm_softc *sc = txq->txq_sc;
   1697 
   1698 	/* If it will wrap around, sync to the end of the ring. */
   1699 	if ((start + num) > WM_NTXDESC(txq)) {
   1700 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1701 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1702 		    (WM_NTXDESC(txq) - start), ops);
   1703 		num -= (WM_NTXDESC(txq) - start);
   1704 		start = 0;
   1705 	}
   1706 
   1707 	/* Now sync whatever is left. */
   1708 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1709 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1710 }
   1711 
   1712 static inline void
   1713 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1714 {
   1715 	struct wm_softc *sc = rxq->rxq_sc;
   1716 
   1717 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1718 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1719 }
   1720 
   1721 static inline void
   1722 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1723 {
   1724 	struct wm_softc *sc = rxq->rxq_sc;
   1725 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1726 	struct mbuf *m = rxs->rxs_mbuf;
   1727 
   1728 	/*
   1729 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1730 	 * so that the payload after the Ethernet header is aligned
   1731 	 * to a 4-byte boundary.
   1732 
   1733 	 * XXX BRAINDAMAGE ALERT!
   1734 	 * The stupid chip uses the same size for every buffer, which
   1735 	 * is set in the Receive Control register.  We are using the 2K
   1736 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1737 	 * reason, we can't "scoot" packets longer than the standard
   1738 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1739 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1740 	 * the upper layer copy the headers.
   1741 	 */
   1742 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1743 
   1744 	if (sc->sc_type == WM_T_82574) {
   1745 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1746 		rxd->erx_data.erxd_addr =
   1747 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1748 		rxd->erx_data.erxd_dd = 0;
   1749 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1750 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1751 
   1752 		rxd->nqrx_data.nrxd_paddr =
   1753 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1754 		/* Currently, split header is not supported. */
   1755 		rxd->nqrx_data.nrxd_haddr = 0;
   1756 	} else {
   1757 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1758 
   1759 		wm_set_dma_addr(&rxd->wrx_addr,
   1760 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1761 		rxd->wrx_len = 0;
   1762 		rxd->wrx_cksum = 0;
   1763 		rxd->wrx_status = 0;
   1764 		rxd->wrx_errors = 0;
   1765 		rxd->wrx_special = 0;
   1766 	}
   1767 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1768 
   1769 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1770 }
   1771 
   1772 /*
   1773  * Device driver interface functions and commonly used functions.
   1774  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1775  */
   1776 
   1777 /* Lookup supported device table */
   1778 static const struct wm_product *
   1779 wm_lookup(const struct pci_attach_args *pa)
   1780 {
   1781 	const struct wm_product *wmp;
   1782 
   1783 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1784 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1785 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1786 			return wmp;
   1787 	}
   1788 	return NULL;
   1789 }
   1790 
   1791 /* The match function (ca_match) */
   1792 static int
   1793 wm_match(device_t parent, cfdata_t cf, void *aux)
   1794 {
   1795 	struct pci_attach_args *pa = aux;
   1796 
   1797 	if (wm_lookup(pa) != NULL)
   1798 		return 1;
   1799 
   1800 	return 0;
   1801 }
   1802 
   1803 /* The attach function (ca_attach) */
   1804 static void
   1805 wm_attach(device_t parent, device_t self, void *aux)
   1806 {
   1807 	struct wm_softc *sc = device_private(self);
   1808 	struct pci_attach_args *pa = aux;
   1809 	prop_dictionary_t dict;
   1810 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1811 	pci_chipset_tag_t pc = pa->pa_pc;
   1812 	int counts[PCI_INTR_TYPE_SIZE];
   1813 	pci_intr_type_t max_type;
   1814 	const char *eetype, *xname;
   1815 	bus_space_tag_t memt;
   1816 	bus_space_handle_t memh;
   1817 	bus_size_t memsize;
   1818 	int memh_valid;
   1819 	int i, error;
   1820 	const struct wm_product *wmp;
   1821 	prop_data_t ea;
   1822 	prop_number_t pn;
   1823 	uint8_t enaddr[ETHER_ADDR_LEN];
   1824 	char buf[256];
   1825 	char wqname[MAXCOMLEN];
   1826 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1827 	pcireg_t preg, memtype;
   1828 	uint16_t eeprom_data, apme_mask;
   1829 	bool force_clear_smbi;
   1830 	uint32_t link_mode;
   1831 	uint32_t reg;
   1832 
   1833 	sc->sc_dev = self;
   1834 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1835 	sc->sc_core_stopping = false;
   1836 
   1837 	wmp = wm_lookup(pa);
   1838 #ifdef DIAGNOSTIC
   1839 	if (wmp == NULL) {
   1840 		printf("\n");
   1841 		panic("wm_attach: impossible");
   1842 	}
   1843 #endif
   1844 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1845 
   1846 	sc->sc_pc = pa->pa_pc;
   1847 	sc->sc_pcitag = pa->pa_tag;
   1848 
   1849 	if (pci_dma64_available(pa))
   1850 		sc->sc_dmat = pa->pa_dmat64;
   1851 	else
   1852 		sc->sc_dmat = pa->pa_dmat;
   1853 
   1854 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1855 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1856 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1857 
   1858 	sc->sc_type = wmp->wmp_type;
   1859 
   1860 	/* Set default function pointers */
   1861 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1862 	sc->phy.release = sc->nvm.release = wm_put_null;
   1863 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1864 
   1865 	if (sc->sc_type < WM_T_82543) {
   1866 		if (sc->sc_rev < 2) {
   1867 			aprint_error_dev(sc->sc_dev,
   1868 			    "i82542 must be at least rev. 2\n");
   1869 			return;
   1870 		}
   1871 		if (sc->sc_rev < 3)
   1872 			sc->sc_type = WM_T_82542_2_0;
   1873 	}
   1874 
   1875 	/*
   1876 	 * Disable MSI for Errata:
   1877 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1878 	 *
   1879 	 *  82544: Errata 25
   1880 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1881 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1882 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1883 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1884 	 *
   1885 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1886 	 *
   1887 	 *  82571 & 82572: Errata 63
   1888 	 */
   1889 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1890 	    || (sc->sc_type == WM_T_82572))
   1891 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1892 
   1893 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1894 	    || (sc->sc_type == WM_T_82580)
   1895 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1896 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1897 		sc->sc_flags |= WM_F_NEWQUEUE;
   1898 
   1899 	/* Set device properties (mactype) */
   1900 	dict = device_properties(sc->sc_dev);
   1901 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1902 
   1903 	/*
   1904 	 * Map the device.  All devices support memory-mapped acccess,
   1905 	 * and it is really required for normal operation.
   1906 	 */
   1907 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1908 	switch (memtype) {
   1909 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1910 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1911 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1912 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1913 		break;
   1914 	default:
   1915 		memh_valid = 0;
   1916 		break;
   1917 	}
   1918 
   1919 	if (memh_valid) {
   1920 		sc->sc_st = memt;
   1921 		sc->sc_sh = memh;
   1922 		sc->sc_ss = memsize;
   1923 	} else {
   1924 		aprint_error_dev(sc->sc_dev,
   1925 		    "unable to map device registers\n");
   1926 		return;
   1927 	}
   1928 
   1929 	/*
   1930 	 * In addition, i82544 and later support I/O mapped indirect
   1931 	 * register access.  It is not desirable (nor supported in
   1932 	 * this driver) to use it for normal operation, though it is
   1933 	 * required to work around bugs in some chip versions.
   1934 	 */
   1935 	if (sc->sc_type >= WM_T_82544) {
   1936 		/* First we have to find the I/O BAR. */
   1937 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1938 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1939 			if (memtype == PCI_MAPREG_TYPE_IO)
   1940 				break;
   1941 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1942 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1943 				i += 4;	/* skip high bits, too */
   1944 		}
   1945 		if (i < PCI_MAPREG_END) {
   1946 			/*
   1947 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1948 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1949 			 * It's no problem because newer chips has no this
   1950 			 * bug.
   1951 			 *
   1952 			 * The i8254x doesn't apparently respond when the
   1953 			 * I/O BAR is 0, which looks somewhat like it's not
   1954 			 * been configured.
   1955 			 */
   1956 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1957 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1958 				aprint_error_dev(sc->sc_dev,
   1959 				    "WARNING: I/O BAR at zero.\n");
   1960 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1961 					0, &sc->sc_iot, &sc->sc_ioh,
   1962 					NULL, &sc->sc_ios) == 0) {
   1963 				sc->sc_flags |= WM_F_IOH_VALID;
   1964 			} else
   1965 				aprint_error_dev(sc->sc_dev,
   1966 				    "WARNING: unable to map I/O space\n");
   1967 		}
   1968 
   1969 	}
   1970 
   1971 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1972 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1973 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1974 	if (sc->sc_type < WM_T_82542_2_1)
   1975 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1976 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1977 
   1978 	/* Power up chip */
   1979 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1980 	    && error != EOPNOTSUPP) {
   1981 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1982 		return;
   1983 	}
   1984 
   1985 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1986 	/*
   1987 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1988 	 * resource.
   1989 	 */
   1990 	if (sc->sc_nqueues > 1) {
   1991 		max_type = PCI_INTR_TYPE_MSIX;
   1992 		/*
   1993 		 *  82583 has a MSI-X capability in the PCI configuration space
   1994 		 * but it doesn't support it. At least the document doesn't
   1995 		 * say anything about MSI-X.
   1996 		 */
   1997 		counts[PCI_INTR_TYPE_MSIX]
   1998 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1999 	} else {
   2000 		max_type = PCI_INTR_TYPE_MSI;
   2001 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2002 	}
   2003 
   2004 	/* Allocation settings */
   2005 	counts[PCI_INTR_TYPE_MSI] = 1;
   2006 	counts[PCI_INTR_TYPE_INTX] = 1;
   2007 	/* overridden by disable flags */
   2008 	if (wm_disable_msi != 0) {
   2009 		counts[PCI_INTR_TYPE_MSI] = 0;
   2010 		if (wm_disable_msix != 0) {
   2011 			max_type = PCI_INTR_TYPE_INTX;
   2012 			counts[PCI_INTR_TYPE_MSIX] = 0;
   2013 		}
   2014 	} else if (wm_disable_msix != 0) {
   2015 		max_type = PCI_INTR_TYPE_MSI;
   2016 		counts[PCI_INTR_TYPE_MSIX] = 0;
   2017 	}
   2018 
   2019 alloc_retry:
   2020 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   2021 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   2022 		return;
   2023 	}
   2024 
   2025 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   2026 		error = wm_setup_msix(sc);
   2027 		if (error) {
   2028 			pci_intr_release(pc, sc->sc_intrs,
   2029 			    counts[PCI_INTR_TYPE_MSIX]);
   2030 
   2031 			/* Setup for MSI: Disable MSI-X */
   2032 			max_type = PCI_INTR_TYPE_MSI;
   2033 			counts[PCI_INTR_TYPE_MSI] = 1;
   2034 			counts[PCI_INTR_TYPE_INTX] = 1;
   2035 			goto alloc_retry;
   2036 		}
   2037 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   2038 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2039 		error = wm_setup_legacy(sc);
   2040 		if (error) {
   2041 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2042 			    counts[PCI_INTR_TYPE_MSI]);
   2043 
   2044 			/* The next try is for INTx: Disable MSI */
   2045 			max_type = PCI_INTR_TYPE_INTX;
   2046 			counts[PCI_INTR_TYPE_INTX] = 1;
   2047 			goto alloc_retry;
   2048 		}
   2049 	} else {
   2050 		wm_adjust_qnum(sc, 0);	/* Must not use multiqueue */
   2051 		error = wm_setup_legacy(sc);
   2052 		if (error) {
   2053 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   2054 			    counts[PCI_INTR_TYPE_INTX]);
   2055 			return;
   2056 		}
   2057 	}
   2058 
   2059 	snprintf(wqname, sizeof(wqname), "%sTxRx", device_xname(sc->sc_dev));
   2060 	error = workqueue_create(&sc->sc_queue_wq, wqname,
   2061 	    wm_handle_queue_work, sc, WM_WORKQUEUE_PRI, IPL_NET,
   2062 	    WM_WORKQUEUE_FLAGS);
   2063 	if (error) {
   2064 		aprint_error_dev(sc->sc_dev,
   2065 		    "unable to create workqueue\n");
   2066 		goto out;
   2067 	}
   2068 
   2069 	/*
   2070 	 * Check the function ID (unit number of the chip).
   2071 	 */
   2072 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   2073 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   2074 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2075 	    || (sc->sc_type == WM_T_82580)
   2076 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2077 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2078 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2079 	else
   2080 		sc->sc_funcid = 0;
   2081 
   2082 	/*
   2083 	 * Determine a few things about the bus we're connected to.
   2084 	 */
   2085 	if (sc->sc_type < WM_T_82543) {
   2086 		/* We don't really know the bus characteristics here. */
   2087 		sc->sc_bus_speed = 33;
   2088 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2089 		/*
   2090 		 * CSA (Communication Streaming Architecture) is about as fast
   2091 		 * a 32-bit 66MHz PCI Bus.
   2092 		 */
   2093 		sc->sc_flags |= WM_F_CSA;
   2094 		sc->sc_bus_speed = 66;
   2095 		aprint_verbose_dev(sc->sc_dev,
   2096 		    "Communication Streaming Architecture\n");
   2097 		if (sc->sc_type == WM_T_82547) {
   2098 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2099 			callout_setfunc(&sc->sc_txfifo_ch,
   2100 			    wm_82547_txfifo_stall, sc);
   2101 			aprint_verbose_dev(sc->sc_dev,
   2102 			    "using 82547 Tx FIFO stall work-around\n");
   2103 		}
   2104 	} else if (sc->sc_type >= WM_T_82571) {
   2105 		sc->sc_flags |= WM_F_PCIE;
   2106 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2107 		    && (sc->sc_type != WM_T_ICH10)
   2108 		    && (sc->sc_type != WM_T_PCH)
   2109 		    && (sc->sc_type != WM_T_PCH2)
   2110 		    && (sc->sc_type != WM_T_PCH_LPT)
   2111 		    && (sc->sc_type != WM_T_PCH_SPT)
   2112 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2113 			/* ICH* and PCH* have no PCIe capability registers */
   2114 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2115 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2116 				NULL) == 0)
   2117 				aprint_error_dev(sc->sc_dev,
   2118 				    "unable to find PCIe capability\n");
   2119 		}
   2120 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2121 	} else {
   2122 		reg = CSR_READ(sc, WMREG_STATUS);
   2123 		if (reg & STATUS_BUS64)
   2124 			sc->sc_flags |= WM_F_BUS64;
   2125 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2126 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2127 
   2128 			sc->sc_flags |= WM_F_PCIX;
   2129 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2130 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2131 				aprint_error_dev(sc->sc_dev,
   2132 				    "unable to find PCIX capability\n");
   2133 			else if (sc->sc_type != WM_T_82545_3 &&
   2134 				 sc->sc_type != WM_T_82546_3) {
   2135 				/*
   2136 				 * Work around a problem caused by the BIOS
   2137 				 * setting the max memory read byte count
   2138 				 * incorrectly.
   2139 				 */
   2140 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2141 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2142 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2143 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2144 
   2145 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2146 				    PCIX_CMD_BYTECNT_SHIFT;
   2147 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2148 				    PCIX_STATUS_MAXB_SHIFT;
   2149 				if (bytecnt > maxb) {
   2150 					aprint_verbose_dev(sc->sc_dev,
   2151 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2152 					    512 << bytecnt, 512 << maxb);
   2153 					pcix_cmd = (pcix_cmd &
   2154 					    ~PCIX_CMD_BYTECNT_MASK) |
   2155 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2156 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2157 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2158 					    pcix_cmd);
   2159 				}
   2160 			}
   2161 		}
   2162 		/*
   2163 		 * The quad port adapter is special; it has a PCIX-PCIX
   2164 		 * bridge on the board, and can run the secondary bus at
   2165 		 * a higher speed.
   2166 		 */
   2167 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2168 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2169 								      : 66;
   2170 		} else if (sc->sc_flags & WM_F_PCIX) {
   2171 			switch (reg & STATUS_PCIXSPD_MASK) {
   2172 			case STATUS_PCIXSPD_50_66:
   2173 				sc->sc_bus_speed = 66;
   2174 				break;
   2175 			case STATUS_PCIXSPD_66_100:
   2176 				sc->sc_bus_speed = 100;
   2177 				break;
   2178 			case STATUS_PCIXSPD_100_133:
   2179 				sc->sc_bus_speed = 133;
   2180 				break;
   2181 			default:
   2182 				aprint_error_dev(sc->sc_dev,
   2183 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2184 				    reg & STATUS_PCIXSPD_MASK);
   2185 				sc->sc_bus_speed = 66;
   2186 				break;
   2187 			}
   2188 		} else
   2189 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2190 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2191 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2192 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2193 	}
   2194 
   2195 	/* clear interesting stat counters */
   2196 	CSR_READ(sc, WMREG_COLC);
   2197 	CSR_READ(sc, WMREG_RXERRC);
   2198 
   2199 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2200 	    || (sc->sc_type >= WM_T_ICH8))
   2201 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2202 	if (sc->sc_type >= WM_T_ICH8)
   2203 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2204 
   2205 	/* Set PHY, NVM mutex related stuff */
   2206 	switch (sc->sc_type) {
   2207 	case WM_T_82542_2_0:
   2208 	case WM_T_82542_2_1:
   2209 	case WM_T_82543:
   2210 	case WM_T_82544:
   2211 		/* Microwire */
   2212 		sc->nvm.read = wm_nvm_read_uwire;
   2213 		sc->sc_nvm_wordsize = 64;
   2214 		sc->sc_nvm_addrbits = 6;
   2215 		break;
   2216 	case WM_T_82540:
   2217 	case WM_T_82545:
   2218 	case WM_T_82545_3:
   2219 	case WM_T_82546:
   2220 	case WM_T_82546_3:
   2221 		/* Microwire */
   2222 		sc->nvm.read = wm_nvm_read_uwire;
   2223 		reg = CSR_READ(sc, WMREG_EECD);
   2224 		if (reg & EECD_EE_SIZE) {
   2225 			sc->sc_nvm_wordsize = 256;
   2226 			sc->sc_nvm_addrbits = 8;
   2227 		} else {
   2228 			sc->sc_nvm_wordsize = 64;
   2229 			sc->sc_nvm_addrbits = 6;
   2230 		}
   2231 		sc->sc_flags |= WM_F_LOCK_EECD;
   2232 		sc->nvm.acquire = wm_get_eecd;
   2233 		sc->nvm.release = wm_put_eecd;
   2234 		break;
   2235 	case WM_T_82541:
   2236 	case WM_T_82541_2:
   2237 	case WM_T_82547:
   2238 	case WM_T_82547_2:
   2239 		reg = CSR_READ(sc, WMREG_EECD);
   2240 		/*
   2241 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2242 		 * on 8254[17], so set flags and functios before calling it.
   2243 		 */
   2244 		sc->sc_flags |= WM_F_LOCK_EECD;
   2245 		sc->nvm.acquire = wm_get_eecd;
   2246 		sc->nvm.release = wm_put_eecd;
   2247 		if (reg & EECD_EE_TYPE) {
   2248 			/* SPI */
   2249 			sc->nvm.read = wm_nvm_read_spi;
   2250 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2251 			wm_nvm_set_addrbits_size_eecd(sc);
   2252 		} else {
   2253 			/* Microwire */
   2254 			sc->nvm.read = wm_nvm_read_uwire;
   2255 			if ((reg & EECD_EE_ABITS) != 0) {
   2256 				sc->sc_nvm_wordsize = 256;
   2257 				sc->sc_nvm_addrbits = 8;
   2258 			} else {
   2259 				sc->sc_nvm_wordsize = 64;
   2260 				sc->sc_nvm_addrbits = 6;
   2261 			}
   2262 		}
   2263 		break;
   2264 	case WM_T_82571:
   2265 	case WM_T_82572:
   2266 		/* SPI */
   2267 		sc->nvm.read = wm_nvm_read_eerd;
   2268 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2269 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2270 		wm_nvm_set_addrbits_size_eecd(sc);
   2271 		sc->phy.acquire = wm_get_swsm_semaphore;
   2272 		sc->phy.release = wm_put_swsm_semaphore;
   2273 		sc->nvm.acquire = wm_get_nvm_82571;
   2274 		sc->nvm.release = wm_put_nvm_82571;
   2275 		break;
   2276 	case WM_T_82573:
   2277 	case WM_T_82574:
   2278 	case WM_T_82583:
   2279 		sc->nvm.read = wm_nvm_read_eerd;
   2280 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2281 		if (sc->sc_type == WM_T_82573) {
   2282 			sc->phy.acquire = wm_get_swsm_semaphore;
   2283 			sc->phy.release = wm_put_swsm_semaphore;
   2284 			sc->nvm.acquire = wm_get_nvm_82571;
   2285 			sc->nvm.release = wm_put_nvm_82571;
   2286 		} else {
   2287 			/* Both PHY and NVM use the same semaphore. */
   2288 			sc->phy.acquire = sc->nvm.acquire
   2289 			    = wm_get_swfwhw_semaphore;
   2290 			sc->phy.release = sc->nvm.release
   2291 			    = wm_put_swfwhw_semaphore;
   2292 		}
   2293 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2294 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2295 			sc->sc_nvm_wordsize = 2048;
   2296 		} else {
   2297 			/* SPI */
   2298 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2299 			wm_nvm_set_addrbits_size_eecd(sc);
   2300 		}
   2301 		break;
   2302 	case WM_T_82575:
   2303 	case WM_T_82576:
   2304 	case WM_T_82580:
   2305 	case WM_T_I350:
   2306 	case WM_T_I354:
   2307 	case WM_T_80003:
   2308 		/* SPI */
   2309 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2310 		wm_nvm_set_addrbits_size_eecd(sc);
   2311 		if ((sc->sc_type == WM_T_80003)
   2312 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2313 			sc->nvm.read = wm_nvm_read_eerd;
   2314 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2315 		} else {
   2316 			sc->nvm.read = wm_nvm_read_spi;
   2317 			sc->sc_flags |= WM_F_LOCK_EECD;
   2318 		}
   2319 		sc->phy.acquire = wm_get_phy_82575;
   2320 		sc->phy.release = wm_put_phy_82575;
   2321 		sc->nvm.acquire = wm_get_nvm_80003;
   2322 		sc->nvm.release = wm_put_nvm_80003;
   2323 		break;
   2324 	case WM_T_ICH8:
   2325 	case WM_T_ICH9:
   2326 	case WM_T_ICH10:
   2327 	case WM_T_PCH:
   2328 	case WM_T_PCH2:
   2329 	case WM_T_PCH_LPT:
   2330 		sc->nvm.read = wm_nvm_read_ich8;
   2331 		/* FLASH */
   2332 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2333 		sc->sc_nvm_wordsize = 2048;
   2334 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2335 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2336 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2337 			aprint_error_dev(sc->sc_dev,
   2338 			    "can't map FLASH registers\n");
   2339 			goto out;
   2340 		}
   2341 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2342 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2343 		    ICH_FLASH_SECTOR_SIZE;
   2344 		sc->sc_ich8_flash_bank_size =
   2345 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2346 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2347 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2348 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2349 		sc->sc_flashreg_offset = 0;
   2350 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2351 		sc->phy.release = wm_put_swflag_ich8lan;
   2352 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2353 		sc->nvm.release = wm_put_nvm_ich8lan;
   2354 		break;
   2355 	case WM_T_PCH_SPT:
   2356 	case WM_T_PCH_CNP:
   2357 		sc->nvm.read = wm_nvm_read_spt;
   2358 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2359 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2360 		sc->sc_flasht = sc->sc_st;
   2361 		sc->sc_flashh = sc->sc_sh;
   2362 		sc->sc_ich8_flash_base = 0;
   2363 		sc->sc_nvm_wordsize =
   2364 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2365 		    * NVM_SIZE_MULTIPLIER;
   2366 		/* It is size in bytes, we want words */
   2367 		sc->sc_nvm_wordsize /= 2;
   2368 		/* Assume 2 banks */
   2369 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2370 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2371 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2372 		sc->phy.release = wm_put_swflag_ich8lan;
   2373 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2374 		sc->nvm.release = wm_put_nvm_ich8lan;
   2375 		break;
   2376 	case WM_T_I210:
   2377 	case WM_T_I211:
   2378 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2379 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2380 		if (wm_nvm_flash_presence_i210(sc)) {
   2381 			sc->nvm.read = wm_nvm_read_eerd;
   2382 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2383 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2384 			wm_nvm_set_addrbits_size_eecd(sc);
   2385 		} else {
   2386 			sc->nvm.read = wm_nvm_read_invm;
   2387 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2388 			sc->sc_nvm_wordsize = INVM_SIZE;
   2389 		}
   2390 		sc->phy.acquire = wm_get_phy_82575;
   2391 		sc->phy.release = wm_put_phy_82575;
   2392 		sc->nvm.acquire = wm_get_nvm_80003;
   2393 		sc->nvm.release = wm_put_nvm_80003;
   2394 		break;
   2395 	default:
   2396 		break;
   2397 	}
   2398 
   2399 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2400 	switch (sc->sc_type) {
   2401 	case WM_T_82571:
   2402 	case WM_T_82572:
   2403 		reg = CSR_READ(sc, WMREG_SWSM2);
   2404 		if ((reg & SWSM2_LOCK) == 0) {
   2405 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2406 			force_clear_smbi = true;
   2407 		} else
   2408 			force_clear_smbi = false;
   2409 		break;
   2410 	case WM_T_82573:
   2411 	case WM_T_82574:
   2412 	case WM_T_82583:
   2413 		force_clear_smbi = true;
   2414 		break;
   2415 	default:
   2416 		force_clear_smbi = false;
   2417 		break;
   2418 	}
   2419 	if (force_clear_smbi) {
   2420 		reg = CSR_READ(sc, WMREG_SWSM);
   2421 		if ((reg & SWSM_SMBI) != 0)
   2422 			aprint_error_dev(sc->sc_dev,
   2423 			    "Please update the Bootagent\n");
   2424 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2425 	}
   2426 
   2427 	/*
   2428 	 * Defer printing the EEPROM type until after verifying the checksum
   2429 	 * This allows the EEPROM type to be printed correctly in the case
   2430 	 * that no EEPROM is attached.
   2431 	 */
   2432 	/*
   2433 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2434 	 * this for later, so we can fail future reads from the EEPROM.
   2435 	 */
   2436 	if (wm_nvm_validate_checksum(sc)) {
   2437 		/*
   2438 		 * Read twice again because some PCI-e parts fail the
   2439 		 * first check due to the link being in sleep state.
   2440 		 */
   2441 		if (wm_nvm_validate_checksum(sc))
   2442 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2443 	}
   2444 
   2445 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2446 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2447 	else {
   2448 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2449 		    sc->sc_nvm_wordsize);
   2450 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2451 			aprint_verbose("iNVM");
   2452 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2453 			aprint_verbose("FLASH(HW)");
   2454 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2455 			aprint_verbose("FLASH");
   2456 		else {
   2457 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2458 				eetype = "SPI";
   2459 			else
   2460 				eetype = "MicroWire";
   2461 			aprint_verbose("(%d address bits) %s EEPROM",
   2462 			    sc->sc_nvm_addrbits, eetype);
   2463 		}
   2464 	}
   2465 	wm_nvm_version(sc);
   2466 	aprint_verbose("\n");
   2467 
   2468 	/*
   2469 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2470 	 * incorrect.
   2471 	 */
   2472 	wm_gmii_setup_phytype(sc, 0, 0);
   2473 
   2474 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2475 	switch (sc->sc_type) {
   2476 	case WM_T_ICH8:
   2477 	case WM_T_ICH9:
   2478 	case WM_T_ICH10:
   2479 	case WM_T_PCH:
   2480 	case WM_T_PCH2:
   2481 	case WM_T_PCH_LPT:
   2482 	case WM_T_PCH_SPT:
   2483 	case WM_T_PCH_CNP:
   2484 		apme_mask = WUC_APME;
   2485 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2486 		if ((eeprom_data & apme_mask) != 0)
   2487 			sc->sc_flags |= WM_F_WOL;
   2488 		break;
   2489 	default:
   2490 		break;
   2491 	}
   2492 
   2493 	/* Reset the chip to a known state. */
   2494 	wm_reset(sc);
   2495 
   2496 	/*
   2497 	 * Check for I21[01] PLL workaround.
   2498 	 *
   2499 	 * Three cases:
   2500 	 * a) Chip is I211.
   2501 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2502 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2503 	 */
   2504 	if (sc->sc_type == WM_T_I211)
   2505 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2506 	if (sc->sc_type == WM_T_I210) {
   2507 		if (!wm_nvm_flash_presence_i210(sc))
   2508 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2509 		else if ((sc->sc_nvm_ver_major < 3)
   2510 		    || ((sc->sc_nvm_ver_major == 3)
   2511 			&& (sc->sc_nvm_ver_minor < 25))) {
   2512 			aprint_verbose_dev(sc->sc_dev,
   2513 			    "ROM image version %d.%d is older than 3.25\n",
   2514 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2515 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2516 		}
   2517 	}
   2518 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2519 		wm_pll_workaround_i210(sc);
   2520 
   2521 	wm_get_wakeup(sc);
   2522 
   2523 	/* Non-AMT based hardware can now take control from firmware */
   2524 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2525 		wm_get_hw_control(sc);
   2526 
   2527 	/*
   2528 	 * Read the Ethernet address from the EEPROM, if not first found
   2529 	 * in device properties.
   2530 	 */
   2531 	ea = prop_dictionary_get(dict, "mac-address");
   2532 	if (ea != NULL) {
   2533 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2534 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2535 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2536 	} else {
   2537 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2538 			aprint_error_dev(sc->sc_dev,
   2539 			    "unable to read Ethernet address\n");
   2540 			goto out;
   2541 		}
   2542 	}
   2543 
   2544 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2545 	    ether_sprintf(enaddr));
   2546 
   2547 	/*
   2548 	 * Read the config info from the EEPROM, and set up various
   2549 	 * bits in the control registers based on their contents.
   2550 	 */
   2551 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2552 	if (pn != NULL) {
   2553 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2554 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2555 	} else {
   2556 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2557 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2558 			goto out;
   2559 		}
   2560 	}
   2561 
   2562 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2563 	if (pn != NULL) {
   2564 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2565 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2566 	} else {
   2567 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2568 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2569 			goto out;
   2570 		}
   2571 	}
   2572 
   2573 	/* check for WM_F_WOL */
   2574 	switch (sc->sc_type) {
   2575 	case WM_T_82542_2_0:
   2576 	case WM_T_82542_2_1:
   2577 	case WM_T_82543:
   2578 		/* dummy? */
   2579 		eeprom_data = 0;
   2580 		apme_mask = NVM_CFG3_APME;
   2581 		break;
   2582 	case WM_T_82544:
   2583 		apme_mask = NVM_CFG2_82544_APM_EN;
   2584 		eeprom_data = cfg2;
   2585 		break;
   2586 	case WM_T_82546:
   2587 	case WM_T_82546_3:
   2588 	case WM_T_82571:
   2589 	case WM_T_82572:
   2590 	case WM_T_82573:
   2591 	case WM_T_82574:
   2592 	case WM_T_82583:
   2593 	case WM_T_80003:
   2594 	case WM_T_82575:
   2595 	case WM_T_82576:
   2596 		apme_mask = NVM_CFG3_APME;
   2597 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2598 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2599 		break;
   2600 	case WM_T_82580:
   2601 	case WM_T_I350:
   2602 	case WM_T_I354:
   2603 	case WM_T_I210:
   2604 	case WM_T_I211:
   2605 		apme_mask = NVM_CFG3_APME;
   2606 		wm_nvm_read(sc,
   2607 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2608 		    1, &eeprom_data);
   2609 		break;
   2610 	case WM_T_ICH8:
   2611 	case WM_T_ICH9:
   2612 	case WM_T_ICH10:
   2613 	case WM_T_PCH:
   2614 	case WM_T_PCH2:
   2615 	case WM_T_PCH_LPT:
   2616 	case WM_T_PCH_SPT:
   2617 	case WM_T_PCH_CNP:
   2618 		/* Already checked before wm_reset () */
   2619 		apme_mask = eeprom_data = 0;
   2620 		break;
   2621 	default: /* XXX 82540 */
   2622 		apme_mask = NVM_CFG3_APME;
   2623 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2624 		break;
   2625 	}
   2626 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2627 	if ((eeprom_data & apme_mask) != 0)
   2628 		sc->sc_flags |= WM_F_WOL;
   2629 
   2630 	/*
   2631 	 * We have the eeprom settings, now apply the special cases
   2632 	 * where the eeprom may be wrong or the board won't support
   2633 	 * wake on lan on a particular port
   2634 	 */
   2635 	switch (sc->sc_pcidevid) {
   2636 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2637 		sc->sc_flags &= ~WM_F_WOL;
   2638 		break;
   2639 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2640 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2641 		/* Wake events only supported on port A for dual fiber
   2642 		 * regardless of eeprom setting */
   2643 		if (sc->sc_funcid == 1)
   2644 			sc->sc_flags &= ~WM_F_WOL;
   2645 		break;
   2646 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2647 		/* If quad port adapter, disable WoL on all but port A */
   2648 		if (sc->sc_funcid != 0)
   2649 			sc->sc_flags &= ~WM_F_WOL;
   2650 		break;
   2651 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2652 		/* Wake events only supported on port A for dual fiber
   2653 		 * regardless of eeprom setting */
   2654 		if (sc->sc_funcid == 1)
   2655 			sc->sc_flags &= ~WM_F_WOL;
   2656 		break;
   2657 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2658 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2659 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2660 		/* If quad port adapter, disable WoL on all but port A */
   2661 		if (sc->sc_funcid != 0)
   2662 			sc->sc_flags &= ~WM_F_WOL;
   2663 		break;
   2664 	}
   2665 
   2666 	if (sc->sc_type >= WM_T_82575) {
   2667 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2668 			aprint_debug_dev(sc->sc_dev, "COMPAT = %hx\n",
   2669 			    nvmword);
   2670 			if ((sc->sc_type == WM_T_82575) ||
   2671 			    (sc->sc_type == WM_T_82576)) {
   2672 				/* Check NVM for autonegotiation */
   2673 				if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE)
   2674 				    != 0)
   2675 					sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2676 			}
   2677 			if ((sc->sc_type == WM_T_82575) ||
   2678 			    (sc->sc_type == WM_T_I350)) {
   2679 				if (nvmword & NVM_COMPAT_MAS_EN(sc->sc_funcid))
   2680 					sc->sc_flags |= WM_F_MAS;
   2681 			}
   2682 		}
   2683 	}
   2684 
   2685 	/*
   2686 	 * XXX need special handling for some multiple port cards
   2687 	 * to disable a paticular port.
   2688 	 */
   2689 
   2690 	if (sc->sc_type >= WM_T_82544) {
   2691 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2692 		if (pn != NULL) {
   2693 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2694 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2695 		} else {
   2696 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2697 				aprint_error_dev(sc->sc_dev,
   2698 				    "unable to read SWDPIN\n");
   2699 				goto out;
   2700 			}
   2701 		}
   2702 	}
   2703 
   2704 	if (cfg1 & NVM_CFG1_ILOS)
   2705 		sc->sc_ctrl |= CTRL_ILOS;
   2706 
   2707 	/*
   2708 	 * XXX
   2709 	 * This code isn't correct because pin 2 and 3 are located
   2710 	 * in different position on newer chips. Check all datasheet.
   2711 	 *
   2712 	 * Until resolve this problem, check if a chip < 82580
   2713 	 */
   2714 	if (sc->sc_type <= WM_T_82580) {
   2715 		if (sc->sc_type >= WM_T_82544) {
   2716 			sc->sc_ctrl |=
   2717 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2718 			    CTRL_SWDPIO_SHIFT;
   2719 			sc->sc_ctrl |=
   2720 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2721 			    CTRL_SWDPINS_SHIFT;
   2722 		} else {
   2723 			sc->sc_ctrl |=
   2724 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2725 			    CTRL_SWDPIO_SHIFT;
   2726 		}
   2727 	}
   2728 
   2729 	if ((sc->sc_type >= WM_T_82580) && (sc->sc_type <= WM_T_I211)) {
   2730 		wm_nvm_read(sc,
   2731 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2732 		    1, &nvmword);
   2733 		if (nvmword & NVM_CFG3_ILOS)
   2734 			sc->sc_ctrl |= CTRL_ILOS;
   2735 	}
   2736 
   2737 #if 0
   2738 	if (sc->sc_type >= WM_T_82544) {
   2739 		if (cfg1 & NVM_CFG1_IPS0)
   2740 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2741 		if (cfg1 & NVM_CFG1_IPS1)
   2742 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2743 		sc->sc_ctrl_ext |=
   2744 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2745 		    CTRL_EXT_SWDPIO_SHIFT;
   2746 		sc->sc_ctrl_ext |=
   2747 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2748 		    CTRL_EXT_SWDPINS_SHIFT;
   2749 	} else {
   2750 		sc->sc_ctrl_ext |=
   2751 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2752 		    CTRL_EXT_SWDPIO_SHIFT;
   2753 	}
   2754 #endif
   2755 
   2756 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2757 #if 0
   2758 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2759 #endif
   2760 
   2761 	if (sc->sc_type == WM_T_PCH) {
   2762 		uint16_t val;
   2763 
   2764 		/* Save the NVM K1 bit setting */
   2765 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2766 
   2767 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2768 			sc->sc_nvm_k1_enabled = 1;
   2769 		else
   2770 			sc->sc_nvm_k1_enabled = 0;
   2771 	}
   2772 
   2773 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2774 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2775 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2776 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2777 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2778 	    || sc->sc_type == WM_T_82573
   2779 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2780 		/* Copper only */
   2781 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2782 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2783 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2784 	    || (sc->sc_type ==WM_T_I211)) {
   2785 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2786 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2787 		switch (link_mode) {
   2788 		case CTRL_EXT_LINK_MODE_1000KX:
   2789 			aprint_normal_dev(sc->sc_dev, "1000KX\n");
   2790 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2791 			break;
   2792 		case CTRL_EXT_LINK_MODE_SGMII:
   2793 			if (wm_sgmii_uses_mdio(sc)) {
   2794 				aprint_normal_dev(sc->sc_dev,
   2795 				    "SGMII(MDIO)\n");
   2796 				sc->sc_flags |= WM_F_SGMII;
   2797 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2798 				break;
   2799 			}
   2800 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2801 			/*FALLTHROUGH*/
   2802 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2803 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2804 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2805 				if (link_mode
   2806 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2807 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2808 					sc->sc_flags |= WM_F_SGMII;
   2809 					aprint_verbose_dev(sc->sc_dev,
   2810 					    "SGMII\n");
   2811 				} else {
   2812 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2813 					aprint_verbose_dev(sc->sc_dev,
   2814 					    "SERDES\n");
   2815 				}
   2816 				break;
   2817 			}
   2818 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2819 				aprint_normal_dev(sc->sc_dev, "SERDES(SFP)\n");
   2820 			else if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2821 				aprint_normal_dev(sc->sc_dev, "SGMII(SFP)\n");
   2822 				sc->sc_flags |= WM_F_SGMII;
   2823 			}
   2824 			/* Do not change link mode for 100BaseFX */
   2825 			if (sc->sc_sfptype == SFF_SFP_ETH_FLAGS_100FX)
   2826 				break;
   2827 
   2828 			/* Change current link mode setting */
   2829 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2830 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2831 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2832 			else
   2833 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2834 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2835 			break;
   2836 		case CTRL_EXT_LINK_MODE_GMII:
   2837 		default:
   2838 			aprint_normal_dev(sc->sc_dev, "Copper\n");
   2839 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2840 			break;
   2841 		}
   2842 
   2843 		reg &= ~CTRL_EXT_I2C_ENA;
   2844 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2845 			reg |= CTRL_EXT_I2C_ENA;
   2846 		else
   2847 			reg &= ~CTRL_EXT_I2C_ENA;
   2848 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2849 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
   2850 			wm_gmii_setup_phytype(sc, 0, 0);
   2851 			wm_reset_mdicnfg_82580(sc);
   2852 		}
   2853 	} else if (sc->sc_type < WM_T_82543 ||
   2854 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2855 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2856 			aprint_error_dev(sc->sc_dev,
   2857 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2858 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2859 		}
   2860 	} else {
   2861 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2862 			aprint_error_dev(sc->sc_dev,
   2863 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2864 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2865 		}
   2866 	}
   2867 
   2868 	if (sc->sc_type >= WM_T_PCH2)
   2869 		sc->sc_flags |= WM_F_EEE;
   2870 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2871 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2872 		/* XXX: Need special handling for I354. (not yet) */
   2873 		if (sc->sc_type != WM_T_I354)
   2874 			sc->sc_flags |= WM_F_EEE;
   2875 	}
   2876 
   2877 	/* Set device properties (macflags) */
   2878 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2879 
   2880 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2881 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2882 
   2883 	/* Initialize the media structures accordingly. */
   2884 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2885 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2886 	else
   2887 		wm_tbi_mediainit(sc); /* All others */
   2888 
   2889 	ifp = &sc->sc_ethercom.ec_if;
   2890 	xname = device_xname(sc->sc_dev);
   2891 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2892 	ifp->if_softc = sc;
   2893 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2894 #ifdef WM_MPSAFE
   2895 	ifp->if_extflags = IFEF_MPSAFE;
   2896 #endif
   2897 	ifp->if_ioctl = wm_ioctl;
   2898 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2899 		ifp->if_start = wm_nq_start;
   2900 		/*
   2901 		 * When the number of CPUs is one and the controller can use
   2902 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2903 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2904 		 * and the other is used for link status changing.
   2905 		 * In this situation, wm_nq_transmit() is disadvantageous
   2906 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2907 		 */
   2908 		if (wm_is_using_multiqueue(sc))
   2909 			ifp->if_transmit = wm_nq_transmit;
   2910 	} else {
   2911 		ifp->if_start = wm_start;
   2912 		/*
   2913 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2914 		 */
   2915 		if (wm_is_using_multiqueue(sc))
   2916 			ifp->if_transmit = wm_transmit;
   2917 	}
   2918 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2919 	ifp->if_init = wm_init;
   2920 	ifp->if_stop = wm_stop;
   2921 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2922 	IFQ_SET_READY(&ifp->if_snd);
   2923 
   2924 	/* Check for jumbo frame */
   2925 	switch (sc->sc_type) {
   2926 	case WM_T_82573:
   2927 		/* XXX limited to 9234 if ASPM is disabled */
   2928 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2929 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2930 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2931 		break;
   2932 	case WM_T_82571:
   2933 	case WM_T_82572:
   2934 	case WM_T_82574:
   2935 	case WM_T_82583:
   2936 	case WM_T_82575:
   2937 	case WM_T_82576:
   2938 	case WM_T_82580:
   2939 	case WM_T_I350:
   2940 	case WM_T_I354:
   2941 	case WM_T_I210:
   2942 	case WM_T_I211:
   2943 	case WM_T_80003:
   2944 	case WM_T_ICH9:
   2945 	case WM_T_ICH10:
   2946 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2947 	case WM_T_PCH_LPT:
   2948 	case WM_T_PCH_SPT:
   2949 	case WM_T_PCH_CNP:
   2950 		/* XXX limited to 9234 */
   2951 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2952 		break;
   2953 	case WM_T_PCH:
   2954 		/* XXX limited to 4096 */
   2955 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2956 		break;
   2957 	case WM_T_82542_2_0:
   2958 	case WM_T_82542_2_1:
   2959 	case WM_T_ICH8:
   2960 		/* No support for jumbo frame */
   2961 		break;
   2962 	default:
   2963 		/* ETHER_MAX_LEN_JUMBO */
   2964 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2965 		break;
   2966 	}
   2967 
   2968 	/* If we're a i82543 or greater, we can support VLANs. */
   2969 	if (sc->sc_type >= WM_T_82543) {
   2970 		sc->sc_ethercom.ec_capabilities |=
   2971 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2972 		sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING;
   2973 	}
   2974 
   2975 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2976 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2977 
   2978 	/*
   2979 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2980 	 * on i82543 and later.
   2981 	 */
   2982 	if (sc->sc_type >= WM_T_82543) {
   2983 		ifp->if_capabilities |=
   2984 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2985 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2986 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2987 		    IFCAP_CSUM_TCPv6_Tx |
   2988 		    IFCAP_CSUM_UDPv6_Tx;
   2989 	}
   2990 
   2991 	/*
   2992 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2993 	 *
   2994 	 *	82541GI (8086:1076) ... no
   2995 	 *	82572EI (8086:10b9) ... yes
   2996 	 */
   2997 	if (sc->sc_type >= WM_T_82571) {
   2998 		ifp->if_capabilities |=
   2999 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   3000 	}
   3001 
   3002 	/*
   3003 	 * If we're a i82544 or greater (except i82547), we can do
   3004 	 * TCP segmentation offload.
   3005 	 */
   3006 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   3007 		ifp->if_capabilities |= IFCAP_TSOv4;
   3008 	}
   3009 
   3010 	if (sc->sc_type >= WM_T_82571) {
   3011 		ifp->if_capabilities |= IFCAP_TSOv6;
   3012 	}
   3013 
   3014 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   3015 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   3016 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   3017 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   3018 
   3019 #ifdef WM_MPSAFE
   3020 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   3021 #else
   3022 	sc->sc_core_lock = NULL;
   3023 #endif
   3024 
   3025 	/* Attach the interface. */
   3026 	error = if_initialize(ifp);
   3027 	if (error != 0) {
   3028 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   3029 		    error);
   3030 		return; /* Error */
   3031 	}
   3032 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   3033 	ether_ifattach(ifp, enaddr);
   3034 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   3035 	if_register(ifp);
   3036 
   3037 #ifdef WM_EVENT_COUNTERS
   3038 	/* Attach event counters. */
   3039 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   3040 	    NULL, xname, "linkintr");
   3041 
   3042 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   3043 	    NULL, xname, "tx_xoff");
   3044 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   3045 	    NULL, xname, "tx_xon");
   3046 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   3047 	    NULL, xname, "rx_xoff");
   3048 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   3049 	    NULL, xname, "rx_xon");
   3050 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   3051 	    NULL, xname, "rx_macctl");
   3052 #endif /* WM_EVENT_COUNTERS */
   3053 
   3054 	sc->sc_txrx_use_workqueue = false;
   3055 
   3056 	wm_init_sysctls(sc);
   3057 
   3058 	if (pmf_device_register(self, wm_suspend, wm_resume))
   3059 		pmf_class_network_register(self, ifp);
   3060 	else
   3061 		aprint_error_dev(self, "couldn't establish power handler\n");
   3062 
   3063 	sc->sc_flags |= WM_F_ATTACHED;
   3064 out:
   3065 	return;
   3066 }
   3067 
   3068 /* The detach function (ca_detach) */
   3069 static int
   3070 wm_detach(device_t self, int flags __unused)
   3071 {
   3072 	struct wm_softc *sc = device_private(self);
   3073 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3074 	int i;
   3075 
   3076 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   3077 		return 0;
   3078 
   3079 	/* Stop the interface. Callouts are stopped in it. */
   3080 	wm_stop(ifp, 1);
   3081 
   3082 	pmf_device_deregister(self);
   3083 
   3084 	sysctl_teardown(&sc->sc_sysctllog);
   3085 
   3086 #ifdef WM_EVENT_COUNTERS
   3087 	evcnt_detach(&sc->sc_ev_linkintr);
   3088 
   3089 	evcnt_detach(&sc->sc_ev_tx_xoff);
   3090 	evcnt_detach(&sc->sc_ev_tx_xon);
   3091 	evcnt_detach(&sc->sc_ev_rx_xoff);
   3092 	evcnt_detach(&sc->sc_ev_rx_xon);
   3093 	evcnt_detach(&sc->sc_ev_rx_macctl);
   3094 #endif /* WM_EVENT_COUNTERS */
   3095 
   3096 	/* Tell the firmware about the release */
   3097 	WM_CORE_LOCK(sc);
   3098 	wm_release_manageability(sc);
   3099 	wm_release_hw_control(sc);
   3100 	wm_enable_wakeup(sc);
   3101 	WM_CORE_UNLOCK(sc);
   3102 
   3103 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3104 
   3105 	/* Delete all remaining media. */
   3106 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   3107 
   3108 	ether_ifdetach(ifp);
   3109 	if_detach(ifp);
   3110 	if_percpuq_destroy(sc->sc_ipq);
   3111 
   3112 	/* Unload RX dmamaps and free mbufs */
   3113 	for (i = 0; i < sc->sc_nqueues; i++) {
   3114 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3115 		mutex_enter(rxq->rxq_lock);
   3116 		wm_rxdrain(rxq);
   3117 		mutex_exit(rxq->rxq_lock);
   3118 	}
   3119 	/* Must unlock here */
   3120 
   3121 	/* Disestablish the interrupt handler */
   3122 	for (i = 0; i < sc->sc_nintrs; i++) {
   3123 		if (sc->sc_ihs[i] != NULL) {
   3124 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3125 			sc->sc_ihs[i] = NULL;
   3126 		}
   3127 	}
   3128 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3129 
   3130 	/* wm_stop() ensure workqueue is stopped. */
   3131 	workqueue_destroy(sc->sc_queue_wq);
   3132 
   3133 	for (i = 0; i < sc->sc_nqueues; i++)
   3134 		softint_disestablish(sc->sc_queue[i].wmq_si);
   3135 
   3136 	wm_free_txrx_queues(sc);
   3137 
   3138 	/* Unmap the registers */
   3139 	if (sc->sc_ss) {
   3140 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3141 		sc->sc_ss = 0;
   3142 	}
   3143 	if (sc->sc_ios) {
   3144 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3145 		sc->sc_ios = 0;
   3146 	}
   3147 	if (sc->sc_flashs) {
   3148 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3149 		sc->sc_flashs = 0;
   3150 	}
   3151 
   3152 	if (sc->sc_core_lock)
   3153 		mutex_obj_free(sc->sc_core_lock);
   3154 	if (sc->sc_ich_phymtx)
   3155 		mutex_obj_free(sc->sc_ich_phymtx);
   3156 	if (sc->sc_ich_nvmmtx)
   3157 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3158 
   3159 	return 0;
   3160 }
   3161 
   3162 static bool
   3163 wm_suspend(device_t self, const pmf_qual_t *qual)
   3164 {
   3165 	struct wm_softc *sc = device_private(self);
   3166 
   3167 	wm_release_manageability(sc);
   3168 	wm_release_hw_control(sc);
   3169 	wm_enable_wakeup(sc);
   3170 
   3171 	return true;
   3172 }
   3173 
   3174 static bool
   3175 wm_resume(device_t self, const pmf_qual_t *qual)
   3176 {
   3177 	struct wm_softc *sc = device_private(self);
   3178 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3179 	pcireg_t reg;
   3180 	char buf[256];
   3181 
   3182 	reg = CSR_READ(sc, WMREG_WUS);
   3183 	if (reg != 0) {
   3184 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3185 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3186 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3187 	}
   3188 
   3189 	if (sc->sc_type >= WM_T_PCH2)
   3190 		wm_resume_workarounds_pchlan(sc);
   3191 	if ((ifp->if_flags & IFF_UP) == 0) {
   3192 		wm_reset(sc);
   3193 		/* Non-AMT based hardware can now take control from firmware */
   3194 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3195 			wm_get_hw_control(sc);
   3196 		wm_init_manageability(sc);
   3197 	} else {
   3198 		/*
   3199 		 * We called pmf_class_network_register(), so if_init() is
   3200 		 * automatically called when IFF_UP. wm_reset(),
   3201 		 * wm_get_hw_control() and wm_init_manageability() are called
   3202 		 * via wm_init().
   3203 		 */
   3204 	}
   3205 
   3206 	return true;
   3207 }
   3208 
   3209 /*
   3210  * wm_watchdog:		[ifnet interface function]
   3211  *
   3212  *	Watchdog timer handler.
   3213  */
   3214 static void
   3215 wm_watchdog(struct ifnet *ifp)
   3216 {
   3217 	int qid;
   3218 	struct wm_softc *sc = ifp->if_softc;
   3219 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3220 
   3221 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3222 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3223 
   3224 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3225 	}
   3226 
   3227 	/* IF any of queues hanged up, reset the interface. */
   3228 	if (hang_queue != 0) {
   3229 		(void)wm_init(ifp);
   3230 
   3231 		/*
   3232 		 * There are still some upper layer processing which call
   3233 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3234 		 */
   3235 		/* Try to get more packets going. */
   3236 		ifp->if_start(ifp);
   3237 	}
   3238 }
   3239 
   3240 
   3241 static void
   3242 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3243 {
   3244 
   3245 	mutex_enter(txq->txq_lock);
   3246 	if (txq->txq_sending &&
   3247 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
   3248 		wm_watchdog_txq_locked(ifp, txq, hang);
   3249 
   3250 	mutex_exit(txq->txq_lock);
   3251 }
   3252 
   3253 static void
   3254 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3255     uint16_t *hang)
   3256 {
   3257 	struct wm_softc *sc = ifp->if_softc;
   3258 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3259 
   3260 	KASSERT(mutex_owned(txq->txq_lock));
   3261 
   3262 	/*
   3263 	 * Since we're using delayed interrupts, sweep up
   3264 	 * before we report an error.
   3265 	 */
   3266 	wm_txeof(txq, UINT_MAX);
   3267 
   3268 	if (txq->txq_sending)
   3269 		*hang |= __BIT(wmq->wmq_id);
   3270 
   3271 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3272 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3273 		    device_xname(sc->sc_dev));
   3274 	} else {
   3275 #ifdef WM_DEBUG
   3276 		int i, j;
   3277 		struct wm_txsoft *txs;
   3278 #endif
   3279 		log(LOG_ERR,
   3280 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3281 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3282 		    txq->txq_next);
   3283 		if_statinc(ifp, if_oerrors);
   3284 #ifdef WM_DEBUG
   3285 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3286 		    i = WM_NEXTTXS(txq, i)) {
   3287 			txs = &txq->txq_soft[i];
   3288 			printf("txs %d tx %d -> %d\n",
   3289 			    i, txs->txs_firstdesc, txs->txs_lastdesc);
   3290 			for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3291 				if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3292 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3293 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3294 					printf("\t %#08x%08x\n",
   3295 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3296 					    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3297 				} else {
   3298 					printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3299 					    (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3300 					    txq->txq_descs[j].wtx_addr.wa_low);
   3301 					printf("\t %#04x%02x%02x%08x\n",
   3302 					    txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3303 					    txq->txq_descs[j].wtx_fields.wtxu_options,
   3304 					    txq->txq_descs[j].wtx_fields.wtxu_status,
   3305 					    txq->txq_descs[j].wtx_cmdlen);
   3306 				}
   3307 				if (j == txs->txs_lastdesc)
   3308 					break;
   3309 			}
   3310 		}
   3311 #endif
   3312 	}
   3313 }
   3314 
   3315 /*
   3316  * wm_tick:
   3317  *
   3318  *	One second timer, used to check link status, sweep up
   3319  *	completed transmit jobs, etc.
   3320  */
   3321 static void
   3322 wm_tick(void *arg)
   3323 {
   3324 	struct wm_softc *sc = arg;
   3325 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3326 #ifndef WM_MPSAFE
   3327 	int s = splnet();
   3328 #endif
   3329 
   3330 	WM_CORE_LOCK(sc);
   3331 
   3332 	if (sc->sc_core_stopping) {
   3333 		WM_CORE_UNLOCK(sc);
   3334 #ifndef WM_MPSAFE
   3335 		splx(s);
   3336 #endif
   3337 		return;
   3338 	}
   3339 
   3340 	if (sc->sc_type >= WM_T_82542_2_1) {
   3341 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3342 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3343 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3344 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3345 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3346 	}
   3347 
   3348 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   3349 	if_statadd_ref(nsr, if_collisions, CSR_READ(sc, WMREG_COLC));
   3350 	if_statadd_ref(nsr, if_ierrors, 0ULL /* ensure quad_t */
   3351 	    + CSR_READ(sc, WMREG_CRCERRS)
   3352 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3353 	    + CSR_READ(sc, WMREG_SYMERRC)
   3354 	    + CSR_READ(sc, WMREG_RXERRC)
   3355 	    + CSR_READ(sc, WMREG_SEC)
   3356 	    + CSR_READ(sc, WMREG_CEXTERR)
   3357 	    + CSR_READ(sc, WMREG_RLEC));
   3358 	/*
   3359 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3360 	 * memory. It does not mean the number of dropped packet. Because
   3361 	 * ethernet controller can receive packets in such case if there is
   3362 	 * space in phy's FIFO.
   3363 	 *
   3364 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3365 	 * own EVCNT instead of if_iqdrops.
   3366 	 */
   3367 	if_statadd_ref(nsr, if_iqdrops, CSR_READ(sc, WMREG_MPC));
   3368 	IF_STAT_PUTREF(ifp);
   3369 
   3370 	if (sc->sc_flags & WM_F_HAS_MII)
   3371 		mii_tick(&sc->sc_mii);
   3372 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3373 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3374 		wm_serdes_tick(sc);
   3375 	else
   3376 		wm_tbi_tick(sc);
   3377 
   3378 	WM_CORE_UNLOCK(sc);
   3379 
   3380 	wm_watchdog(ifp);
   3381 
   3382 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3383 }
   3384 
   3385 static int
   3386 wm_ifflags_cb(struct ethercom *ec)
   3387 {
   3388 	struct ifnet *ifp = &ec->ec_if;
   3389 	struct wm_softc *sc = ifp->if_softc;
   3390 	u_short iffchange;
   3391 	int ecchange;
   3392 	bool needreset = false;
   3393 	int rc = 0;
   3394 
   3395 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3396 		device_xname(sc->sc_dev), __func__));
   3397 
   3398 	WM_CORE_LOCK(sc);
   3399 
   3400 	/*
   3401 	 * Check for if_flags.
   3402 	 * Main usage is to prevent linkdown when opening bpf.
   3403 	 */
   3404 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3405 	sc->sc_if_flags = ifp->if_flags;
   3406 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3407 		needreset = true;
   3408 		goto ec;
   3409 	}
   3410 
   3411 	/* iff related updates */
   3412 	if ((iffchange & IFF_PROMISC) != 0)
   3413 		wm_set_filter(sc);
   3414 
   3415 	wm_set_vlan(sc);
   3416 
   3417 ec:
   3418 	/* Check for ec_capenable. */
   3419 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3420 	sc->sc_ec_capenable = ec->ec_capenable;
   3421 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3422 		needreset = true;
   3423 		goto out;
   3424 	}
   3425 
   3426 	/* ec related updates */
   3427 	wm_set_eee(sc);
   3428 
   3429 out:
   3430 	if (needreset)
   3431 		rc = ENETRESET;
   3432 	WM_CORE_UNLOCK(sc);
   3433 
   3434 	return rc;
   3435 }
   3436 
   3437 /*
   3438  * wm_ioctl:		[ifnet interface function]
   3439  *
   3440  *	Handle control requests from the operator.
   3441  */
   3442 static int
   3443 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3444 {
   3445 	struct wm_softc *sc = ifp->if_softc;
   3446 	struct ifreq *ifr = (struct ifreq *)data;
   3447 	struct ifaddr *ifa = (struct ifaddr *)data;
   3448 	struct sockaddr_dl *sdl;
   3449 	int s, error;
   3450 
   3451 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3452 		device_xname(sc->sc_dev), __func__));
   3453 
   3454 #ifndef WM_MPSAFE
   3455 	s = splnet();
   3456 #endif
   3457 	switch (cmd) {
   3458 	case SIOCSIFMEDIA:
   3459 		WM_CORE_LOCK(sc);
   3460 		/* Flow control requires full-duplex mode. */
   3461 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3462 		    (ifr->ifr_media & IFM_FDX) == 0)
   3463 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3464 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3465 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3466 				/* We can do both TXPAUSE and RXPAUSE. */
   3467 				ifr->ifr_media |=
   3468 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3469 			}
   3470 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3471 		}
   3472 		WM_CORE_UNLOCK(sc);
   3473 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3474 		break;
   3475 	case SIOCINITIFADDR:
   3476 		WM_CORE_LOCK(sc);
   3477 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3478 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3479 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3480 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3481 			/* Unicast address is the first multicast entry */
   3482 			wm_set_filter(sc);
   3483 			error = 0;
   3484 			WM_CORE_UNLOCK(sc);
   3485 			break;
   3486 		}
   3487 		WM_CORE_UNLOCK(sc);
   3488 		/*FALLTHROUGH*/
   3489 	default:
   3490 #ifdef WM_MPSAFE
   3491 		s = splnet();
   3492 #endif
   3493 		/* It may call wm_start, so unlock here */
   3494 		error = ether_ioctl(ifp, cmd, data);
   3495 #ifdef WM_MPSAFE
   3496 		splx(s);
   3497 #endif
   3498 		if (error != ENETRESET)
   3499 			break;
   3500 
   3501 		error = 0;
   3502 
   3503 		if (cmd == SIOCSIFCAP)
   3504 			error = (*ifp->if_init)(ifp);
   3505 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3506 			;
   3507 		else if (ifp->if_flags & IFF_RUNNING) {
   3508 			/*
   3509 			 * Multicast list has changed; set the hardware filter
   3510 			 * accordingly.
   3511 			 */
   3512 			WM_CORE_LOCK(sc);
   3513 			wm_set_filter(sc);
   3514 			WM_CORE_UNLOCK(sc);
   3515 		}
   3516 		break;
   3517 	}
   3518 
   3519 #ifndef WM_MPSAFE
   3520 	splx(s);
   3521 #endif
   3522 	return error;
   3523 }
   3524 
   3525 /* MAC address related */
   3526 
   3527 /*
   3528  * Get the offset of MAC address and return it.
   3529  * If error occured, use offset 0.
   3530  */
   3531 static uint16_t
   3532 wm_check_alt_mac_addr(struct wm_softc *sc)
   3533 {
   3534 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3535 	uint16_t offset = NVM_OFF_MACADDR;
   3536 
   3537 	/* Try to read alternative MAC address pointer */
   3538 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3539 		return 0;
   3540 
   3541 	/* Check pointer if it's valid or not. */
   3542 	if ((offset == 0x0000) || (offset == 0xffff))
   3543 		return 0;
   3544 
   3545 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3546 	/*
   3547 	 * Check whether alternative MAC address is valid or not.
   3548 	 * Some cards have non 0xffff pointer but those don't use
   3549 	 * alternative MAC address in reality.
   3550 	 *
   3551 	 * Check whether the broadcast bit is set or not.
   3552 	 */
   3553 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3554 		if (((myea[0] & 0xff) & 0x01) == 0)
   3555 			return offset; /* Found */
   3556 
   3557 	/* Not found */
   3558 	return 0;
   3559 }
   3560 
   3561 static int
   3562 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3563 {
   3564 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3565 	uint16_t offset = NVM_OFF_MACADDR;
   3566 	int do_invert = 0;
   3567 
   3568 	switch (sc->sc_type) {
   3569 	case WM_T_82580:
   3570 	case WM_T_I350:
   3571 	case WM_T_I354:
   3572 		/* EEPROM Top Level Partitioning */
   3573 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3574 		break;
   3575 	case WM_T_82571:
   3576 	case WM_T_82575:
   3577 	case WM_T_82576:
   3578 	case WM_T_80003:
   3579 	case WM_T_I210:
   3580 	case WM_T_I211:
   3581 		offset = wm_check_alt_mac_addr(sc);
   3582 		if (offset == 0)
   3583 			if ((sc->sc_funcid & 0x01) == 1)
   3584 				do_invert = 1;
   3585 		break;
   3586 	default:
   3587 		if ((sc->sc_funcid & 0x01) == 1)
   3588 			do_invert = 1;
   3589 		break;
   3590 	}
   3591 
   3592 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3593 		goto bad;
   3594 
   3595 	enaddr[0] = myea[0] & 0xff;
   3596 	enaddr[1] = myea[0] >> 8;
   3597 	enaddr[2] = myea[1] & 0xff;
   3598 	enaddr[3] = myea[1] >> 8;
   3599 	enaddr[4] = myea[2] & 0xff;
   3600 	enaddr[5] = myea[2] >> 8;
   3601 
   3602 	/*
   3603 	 * Toggle the LSB of the MAC address on the second port
   3604 	 * of some dual port cards.
   3605 	 */
   3606 	if (do_invert != 0)
   3607 		enaddr[5] ^= 1;
   3608 
   3609 	return 0;
   3610 
   3611  bad:
   3612 	return -1;
   3613 }
   3614 
   3615 /*
   3616  * wm_set_ral:
   3617  *
   3618  *	Set an entery in the receive address list.
   3619  */
   3620 static void
   3621 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3622 {
   3623 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3624 	uint32_t wlock_mac;
   3625 	int rv;
   3626 
   3627 	if (enaddr != NULL) {
   3628 		ral_lo = (uint32_t)enaddr[0] | ((uint32_t)enaddr[1] << 8) |
   3629 		    ((uint32_t)enaddr[2] << 16) | ((uint32_t)enaddr[3] << 24);
   3630 		ral_hi = (uint32_t)enaddr[4] | ((uint32_t)enaddr[5] << 8);
   3631 		ral_hi |= RAL_AV;
   3632 	} else {
   3633 		ral_lo = 0;
   3634 		ral_hi = 0;
   3635 	}
   3636 
   3637 	switch (sc->sc_type) {
   3638 	case WM_T_82542_2_0:
   3639 	case WM_T_82542_2_1:
   3640 	case WM_T_82543:
   3641 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3642 		CSR_WRITE_FLUSH(sc);
   3643 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3644 		CSR_WRITE_FLUSH(sc);
   3645 		break;
   3646 	case WM_T_PCH2:
   3647 	case WM_T_PCH_LPT:
   3648 	case WM_T_PCH_SPT:
   3649 	case WM_T_PCH_CNP:
   3650 		if (idx == 0) {
   3651 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3652 			CSR_WRITE_FLUSH(sc);
   3653 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3654 			CSR_WRITE_FLUSH(sc);
   3655 			return;
   3656 		}
   3657 		if (sc->sc_type != WM_T_PCH2) {
   3658 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3659 			    FWSM_WLOCK_MAC);
   3660 			addrl = WMREG_SHRAL(idx - 1);
   3661 			addrh = WMREG_SHRAH(idx - 1);
   3662 		} else {
   3663 			wlock_mac = 0;
   3664 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3665 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3666 		}
   3667 
   3668 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3669 			rv = wm_get_swflag_ich8lan(sc);
   3670 			if (rv != 0)
   3671 				return;
   3672 			CSR_WRITE(sc, addrl, ral_lo);
   3673 			CSR_WRITE_FLUSH(sc);
   3674 			CSR_WRITE(sc, addrh, ral_hi);
   3675 			CSR_WRITE_FLUSH(sc);
   3676 			wm_put_swflag_ich8lan(sc);
   3677 		}
   3678 
   3679 		break;
   3680 	default:
   3681 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3682 		CSR_WRITE_FLUSH(sc);
   3683 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3684 		CSR_WRITE_FLUSH(sc);
   3685 		break;
   3686 	}
   3687 }
   3688 
   3689 /*
   3690  * wm_mchash:
   3691  *
   3692  *	Compute the hash of the multicast address for the 4096-bit
   3693  *	multicast filter.
   3694  */
   3695 static uint32_t
   3696 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3697 {
   3698 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3699 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3700 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3701 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3702 	uint32_t hash;
   3703 
   3704 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3705 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3706 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3707 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3708 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3709 		    (((uint16_t)enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3710 		return (hash & 0x3ff);
   3711 	}
   3712 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3713 	    (((uint16_t)enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3714 
   3715 	return (hash & 0xfff);
   3716 }
   3717 
   3718 /*
   3719  *
   3720  *
   3721  */
   3722 static int
   3723 wm_rar_count(struct wm_softc *sc)
   3724 {
   3725 	int size;
   3726 
   3727 	switch (sc->sc_type) {
   3728 	case WM_T_ICH8:
   3729 		size = WM_RAL_TABSIZE_ICH8 -1;
   3730 		break;
   3731 	case WM_T_ICH9:
   3732 	case WM_T_ICH10:
   3733 	case WM_T_PCH:
   3734 		size = WM_RAL_TABSIZE_ICH8;
   3735 		break;
   3736 	case WM_T_PCH2:
   3737 		size = WM_RAL_TABSIZE_PCH2;
   3738 		break;
   3739 	case WM_T_PCH_LPT:
   3740 	case WM_T_PCH_SPT:
   3741 	case WM_T_PCH_CNP:
   3742 		size = WM_RAL_TABSIZE_PCH_LPT;
   3743 		break;
   3744 	case WM_T_82575:
   3745 	case WM_T_I210:
   3746 	case WM_T_I211:
   3747 		size = WM_RAL_TABSIZE_82575;
   3748 		break;
   3749 	case WM_T_82576:
   3750 	case WM_T_82580:
   3751 		size = WM_RAL_TABSIZE_82576;
   3752 		break;
   3753 	case WM_T_I350:
   3754 	case WM_T_I354:
   3755 		size = WM_RAL_TABSIZE_I350;
   3756 		break;
   3757 	default:
   3758 		size = WM_RAL_TABSIZE;
   3759 	}
   3760 
   3761 	return size;
   3762 }
   3763 
   3764 /*
   3765  * wm_set_filter:
   3766  *
   3767  *	Set up the receive filter.
   3768  */
   3769 static void
   3770 wm_set_filter(struct wm_softc *sc)
   3771 {
   3772 	struct ethercom *ec = &sc->sc_ethercom;
   3773 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3774 	struct ether_multi *enm;
   3775 	struct ether_multistep step;
   3776 	bus_addr_t mta_reg;
   3777 	uint32_t hash, reg, bit;
   3778 	int i, size, ralmax;
   3779 
   3780 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3781 		device_xname(sc->sc_dev), __func__));
   3782 
   3783 	if (sc->sc_type >= WM_T_82544)
   3784 		mta_reg = WMREG_CORDOVA_MTA;
   3785 	else
   3786 		mta_reg = WMREG_MTA;
   3787 
   3788 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3789 
   3790 	if (ifp->if_flags & IFF_BROADCAST)
   3791 		sc->sc_rctl |= RCTL_BAM;
   3792 	if (ifp->if_flags & IFF_PROMISC) {
   3793 		sc->sc_rctl |= RCTL_UPE;
   3794 		ETHER_LOCK(ec);
   3795 		ec->ec_flags |= ETHER_F_ALLMULTI;
   3796 		ETHER_UNLOCK(ec);
   3797 		goto allmulti;
   3798 	}
   3799 
   3800 	/*
   3801 	 * Set the station address in the first RAL slot, and
   3802 	 * clear the remaining slots.
   3803 	 */
   3804 	size = wm_rar_count(sc);
   3805 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3806 
   3807 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3808 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3809 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3810 		switch (i) {
   3811 		case 0:
   3812 			/* We can use all entries */
   3813 			ralmax = size;
   3814 			break;
   3815 		case 1:
   3816 			/* Only RAR[0] */
   3817 			ralmax = 1;
   3818 			break;
   3819 		default:
   3820 			/* Available SHRA + RAR[0] */
   3821 			ralmax = i + 1;
   3822 		}
   3823 	} else
   3824 		ralmax = size;
   3825 	for (i = 1; i < size; i++) {
   3826 		if (i < ralmax)
   3827 			wm_set_ral(sc, NULL, i);
   3828 	}
   3829 
   3830 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3831 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3832 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3833 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3834 		size = WM_ICH8_MC_TABSIZE;
   3835 	else
   3836 		size = WM_MC_TABSIZE;
   3837 	/* Clear out the multicast table. */
   3838 	for (i = 0; i < size; i++) {
   3839 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3840 		CSR_WRITE_FLUSH(sc);
   3841 	}
   3842 
   3843 	ETHER_LOCK(ec);
   3844 	ETHER_FIRST_MULTI(step, ec, enm);
   3845 	while (enm != NULL) {
   3846 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3847 			ec->ec_flags |= ETHER_F_ALLMULTI;
   3848 			ETHER_UNLOCK(ec);
   3849 			/*
   3850 			 * We must listen to a range of multicast addresses.
   3851 			 * For now, just accept all multicasts, rather than
   3852 			 * trying to set only those filter bits needed to match
   3853 			 * the range.  (At this time, the only use of address
   3854 			 * ranges is for IP multicast routing, for which the
   3855 			 * range is big enough to require all bits set.)
   3856 			 */
   3857 			goto allmulti;
   3858 		}
   3859 
   3860 		hash = wm_mchash(sc, enm->enm_addrlo);
   3861 
   3862 		reg = (hash >> 5);
   3863 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3864 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3865 		    || (sc->sc_type == WM_T_PCH2)
   3866 		    || (sc->sc_type == WM_T_PCH_LPT)
   3867 		    || (sc->sc_type == WM_T_PCH_SPT)
   3868 		    || (sc->sc_type == WM_T_PCH_CNP))
   3869 			reg &= 0x1f;
   3870 		else
   3871 			reg &= 0x7f;
   3872 		bit = hash & 0x1f;
   3873 
   3874 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3875 		hash |= 1U << bit;
   3876 
   3877 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3878 			/*
   3879 			 * 82544 Errata 9: Certain register cannot be written
   3880 			 * with particular alignments in PCI-X bus operation
   3881 			 * (FCAH, MTA and VFTA).
   3882 			 */
   3883 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3884 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3885 			CSR_WRITE_FLUSH(sc);
   3886 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3887 			CSR_WRITE_FLUSH(sc);
   3888 		} else {
   3889 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3890 			CSR_WRITE_FLUSH(sc);
   3891 		}
   3892 
   3893 		ETHER_NEXT_MULTI(step, enm);
   3894 	}
   3895 	ec->ec_flags &= ~ETHER_F_ALLMULTI;
   3896 	ETHER_UNLOCK(ec);
   3897 
   3898 	goto setit;
   3899 
   3900  allmulti:
   3901 	sc->sc_rctl |= RCTL_MPE;
   3902 
   3903  setit:
   3904 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3905 }
   3906 
   3907 /* Reset and init related */
   3908 
   3909 static void
   3910 wm_set_vlan(struct wm_softc *sc)
   3911 {
   3912 
   3913 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3914 		device_xname(sc->sc_dev), __func__));
   3915 
   3916 	/* Deal with VLAN enables. */
   3917 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3918 		sc->sc_ctrl |= CTRL_VME;
   3919 	else
   3920 		sc->sc_ctrl &= ~CTRL_VME;
   3921 
   3922 	/* Write the control registers. */
   3923 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3924 }
   3925 
   3926 static void
   3927 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3928 {
   3929 	uint32_t gcr;
   3930 	pcireg_t ctrl2;
   3931 
   3932 	gcr = CSR_READ(sc, WMREG_GCR);
   3933 
   3934 	/* Only take action if timeout value is defaulted to 0 */
   3935 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3936 		goto out;
   3937 
   3938 	if ((gcr & GCR_CAP_VER2) == 0) {
   3939 		gcr |= GCR_CMPL_TMOUT_10MS;
   3940 		goto out;
   3941 	}
   3942 
   3943 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3944 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3945 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3946 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3947 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3948 
   3949 out:
   3950 	/* Disable completion timeout resend */
   3951 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3952 
   3953 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3954 }
   3955 
   3956 void
   3957 wm_get_auto_rd_done(struct wm_softc *sc)
   3958 {
   3959 	int i;
   3960 
   3961 	/* wait for eeprom to reload */
   3962 	switch (sc->sc_type) {
   3963 	case WM_T_82571:
   3964 	case WM_T_82572:
   3965 	case WM_T_82573:
   3966 	case WM_T_82574:
   3967 	case WM_T_82583:
   3968 	case WM_T_82575:
   3969 	case WM_T_82576:
   3970 	case WM_T_82580:
   3971 	case WM_T_I350:
   3972 	case WM_T_I354:
   3973 	case WM_T_I210:
   3974 	case WM_T_I211:
   3975 	case WM_T_80003:
   3976 	case WM_T_ICH8:
   3977 	case WM_T_ICH9:
   3978 		for (i = 0; i < 10; i++) {
   3979 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3980 				break;
   3981 			delay(1000);
   3982 		}
   3983 		if (i == 10) {
   3984 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3985 			    "complete\n", device_xname(sc->sc_dev));
   3986 		}
   3987 		break;
   3988 	default:
   3989 		break;
   3990 	}
   3991 }
   3992 
   3993 void
   3994 wm_lan_init_done(struct wm_softc *sc)
   3995 {
   3996 	uint32_t reg = 0;
   3997 	int i;
   3998 
   3999 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4000 		device_xname(sc->sc_dev), __func__));
   4001 
   4002 	/* Wait for eeprom to reload */
   4003 	switch (sc->sc_type) {
   4004 	case WM_T_ICH10:
   4005 	case WM_T_PCH:
   4006 	case WM_T_PCH2:
   4007 	case WM_T_PCH_LPT:
   4008 	case WM_T_PCH_SPT:
   4009 	case WM_T_PCH_CNP:
   4010 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   4011 			reg = CSR_READ(sc, WMREG_STATUS);
   4012 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   4013 				break;
   4014 			delay(100);
   4015 		}
   4016 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   4017 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   4018 			    "complete\n", device_xname(sc->sc_dev), __func__);
   4019 		}
   4020 		break;
   4021 	default:
   4022 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4023 		    __func__);
   4024 		break;
   4025 	}
   4026 
   4027 	reg &= ~STATUS_LAN_INIT_DONE;
   4028 	CSR_WRITE(sc, WMREG_STATUS, reg);
   4029 }
   4030 
   4031 void
   4032 wm_get_cfg_done(struct wm_softc *sc)
   4033 {
   4034 	int mask;
   4035 	uint32_t reg;
   4036 	int i;
   4037 
   4038 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4039 		device_xname(sc->sc_dev), __func__));
   4040 
   4041 	/* Wait for eeprom to reload */
   4042 	switch (sc->sc_type) {
   4043 	case WM_T_82542_2_0:
   4044 	case WM_T_82542_2_1:
   4045 		/* null */
   4046 		break;
   4047 	case WM_T_82543:
   4048 	case WM_T_82544:
   4049 	case WM_T_82540:
   4050 	case WM_T_82545:
   4051 	case WM_T_82545_3:
   4052 	case WM_T_82546:
   4053 	case WM_T_82546_3:
   4054 	case WM_T_82541:
   4055 	case WM_T_82541_2:
   4056 	case WM_T_82547:
   4057 	case WM_T_82547_2:
   4058 	case WM_T_82573:
   4059 	case WM_T_82574:
   4060 	case WM_T_82583:
   4061 		/* generic */
   4062 		delay(10*1000);
   4063 		break;
   4064 	case WM_T_80003:
   4065 	case WM_T_82571:
   4066 	case WM_T_82572:
   4067 	case WM_T_82575:
   4068 	case WM_T_82576:
   4069 	case WM_T_82580:
   4070 	case WM_T_I350:
   4071 	case WM_T_I354:
   4072 	case WM_T_I210:
   4073 	case WM_T_I211:
   4074 		if (sc->sc_type == WM_T_82571) {
   4075 			/* Only 82571 shares port 0 */
   4076 			mask = EEMNGCTL_CFGDONE_0;
   4077 		} else
   4078 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   4079 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   4080 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   4081 				break;
   4082 			delay(1000);
   4083 		}
   4084 		if (i >= WM_PHY_CFG_TIMEOUT)
   4085 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   4086 				device_xname(sc->sc_dev), __func__));
   4087 		break;
   4088 	case WM_T_ICH8:
   4089 	case WM_T_ICH9:
   4090 	case WM_T_ICH10:
   4091 	case WM_T_PCH:
   4092 	case WM_T_PCH2:
   4093 	case WM_T_PCH_LPT:
   4094 	case WM_T_PCH_SPT:
   4095 	case WM_T_PCH_CNP:
   4096 		delay(10*1000);
   4097 		if (sc->sc_type >= WM_T_ICH10)
   4098 			wm_lan_init_done(sc);
   4099 		else
   4100 			wm_get_auto_rd_done(sc);
   4101 
   4102 		/* Clear PHY Reset Asserted bit */
   4103 		reg = CSR_READ(sc, WMREG_STATUS);
   4104 		if ((reg & STATUS_PHYRA) != 0)
   4105 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4106 		break;
   4107 	default:
   4108 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4109 		    __func__);
   4110 		break;
   4111 	}
   4112 }
   4113 
   4114 int
   4115 wm_phy_post_reset(struct wm_softc *sc)
   4116 {
   4117 	device_t dev = sc->sc_dev;
   4118 	uint16_t reg;
   4119 	int rv = 0;
   4120 
   4121 	/* This function is only for ICH8 and newer. */
   4122 	if (sc->sc_type < WM_T_ICH8)
   4123 		return 0;
   4124 
   4125 	if (wm_phy_resetisblocked(sc)) {
   4126 		/* XXX */
   4127 		device_printf(dev, "PHY is blocked\n");
   4128 		return -1;
   4129 	}
   4130 
   4131 	/* Allow time for h/w to get to quiescent state after reset */
   4132 	delay(10*1000);
   4133 
   4134 	/* Perform any necessary post-reset workarounds */
   4135 	if (sc->sc_type == WM_T_PCH)
   4136 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4137 	else if (sc->sc_type == WM_T_PCH2)
   4138 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4139 	if (rv != 0)
   4140 		return rv;
   4141 
   4142 	/* Clear the host wakeup bit after lcd reset */
   4143 	if (sc->sc_type >= WM_T_PCH) {
   4144 		wm_gmii_hv_readreg(dev, 2, BM_PORT_GEN_CFG, &reg);
   4145 		reg &= ~BM_WUC_HOST_WU_BIT;
   4146 		wm_gmii_hv_writereg(dev, 2, BM_PORT_GEN_CFG, reg);
   4147 	}
   4148 
   4149 	/* Configure the LCD with the extended configuration region in NVM */
   4150 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4151 		return rv;
   4152 
   4153 	/* Configure the LCD with the OEM bits in NVM */
   4154 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4155 
   4156 	if (sc->sc_type == WM_T_PCH2) {
   4157 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4158 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4159 			delay(10 * 1000);
   4160 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4161 		}
   4162 		/* Set EEE LPI Update Timer to 200usec */
   4163 		rv = sc->phy.acquire(sc);
   4164 		if (rv)
   4165 			return rv;
   4166 		rv = wm_write_emi_reg_locked(dev,
   4167 		    I82579_LPI_UPDATE_TIMER, 0x1387);
   4168 		sc->phy.release(sc);
   4169 	}
   4170 
   4171 	return rv;
   4172 }
   4173 
   4174 /* Only for PCH and newer */
   4175 static int
   4176 wm_write_smbus_addr(struct wm_softc *sc)
   4177 {
   4178 	uint32_t strap, freq;
   4179 	uint16_t phy_data;
   4180 	int rv;
   4181 
   4182 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4183 		device_xname(sc->sc_dev), __func__));
   4184 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4185 
   4186 	strap = CSR_READ(sc, WMREG_STRAP);
   4187 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4188 
   4189 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4190 	if (rv != 0)
   4191 		return -1;
   4192 
   4193 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4194 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4195 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4196 
   4197 	if (sc->sc_phytype == WMPHY_I217) {
   4198 		/* Restore SMBus frequency */
   4199 		if (freq --) {
   4200 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4201 			    | HV_SMB_ADDR_FREQ_HIGH);
   4202 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4203 			    HV_SMB_ADDR_FREQ_LOW);
   4204 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4205 			    HV_SMB_ADDR_FREQ_HIGH);
   4206 		} else
   4207 			DPRINTF(WM_DEBUG_INIT,
   4208 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4209 				device_xname(sc->sc_dev), __func__));
   4210 	}
   4211 
   4212 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4213 	    phy_data);
   4214 }
   4215 
   4216 static int
   4217 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4218 {
   4219 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4220 	uint16_t phy_page = 0;
   4221 	int rv = 0;
   4222 
   4223 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4224 		device_xname(sc->sc_dev), __func__));
   4225 
   4226 	switch (sc->sc_type) {
   4227 	case WM_T_ICH8:
   4228 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4229 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4230 			return 0;
   4231 
   4232 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4233 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4234 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4235 			break;
   4236 		}
   4237 		/* FALLTHROUGH */
   4238 	case WM_T_PCH:
   4239 	case WM_T_PCH2:
   4240 	case WM_T_PCH_LPT:
   4241 	case WM_T_PCH_SPT:
   4242 	case WM_T_PCH_CNP:
   4243 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4244 		break;
   4245 	default:
   4246 		return 0;
   4247 	}
   4248 
   4249 	if ((rv = sc->phy.acquire(sc)) != 0)
   4250 		return rv;
   4251 
   4252 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4253 	if ((reg & sw_cfg_mask) == 0)
   4254 		goto release;
   4255 
   4256 	/*
   4257 	 * Make sure HW does not configure LCD from PHY extended configuration
   4258 	 * before SW configuration
   4259 	 */
   4260 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4261 	if ((sc->sc_type < WM_T_PCH2)
   4262 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4263 		goto release;
   4264 
   4265 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4266 		device_xname(sc->sc_dev), __func__));
   4267 	/* word_addr is in DWORD */
   4268 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4269 
   4270 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4271 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4272 	if (cnf_size == 0)
   4273 		goto release;
   4274 
   4275 	if (((sc->sc_type == WM_T_PCH)
   4276 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4277 	    || (sc->sc_type > WM_T_PCH)) {
   4278 		/*
   4279 		 * HW configures the SMBus address and LEDs when the OEM and
   4280 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4281 		 * are cleared, SW will configure them instead.
   4282 		 */
   4283 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4284 			device_xname(sc->sc_dev), __func__));
   4285 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4286 			goto release;
   4287 
   4288 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4289 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4290 		    (uint16_t)reg);
   4291 		if (rv != 0)
   4292 			goto release;
   4293 	}
   4294 
   4295 	/* Configure LCD from extended configuration region. */
   4296 	for (i = 0; i < cnf_size; i++) {
   4297 		uint16_t reg_data, reg_addr;
   4298 
   4299 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4300 			goto release;
   4301 
   4302 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4303 			goto release;
   4304 
   4305 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4306 			phy_page = reg_data;
   4307 
   4308 		reg_addr &= IGPHY_MAXREGADDR;
   4309 		reg_addr |= phy_page;
   4310 
   4311 		KASSERT(sc->phy.writereg_locked != NULL);
   4312 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4313 		    reg_data);
   4314 	}
   4315 
   4316 release:
   4317 	sc->phy.release(sc);
   4318 	return rv;
   4319 }
   4320 
   4321 /*
   4322  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4323  *  @sc:       pointer to the HW structure
   4324  *  @d0_state: boolean if entering d0 or d3 device state
   4325  *
   4326  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4327  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4328  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4329  */
   4330 int
   4331 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4332 {
   4333 	uint32_t mac_reg;
   4334 	uint16_t oem_reg;
   4335 	int rv;
   4336 
   4337 	if (sc->sc_type < WM_T_PCH)
   4338 		return 0;
   4339 
   4340 	rv = sc->phy.acquire(sc);
   4341 	if (rv != 0)
   4342 		return rv;
   4343 
   4344 	if (sc->sc_type == WM_T_PCH) {
   4345 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4346 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4347 			goto release;
   4348 	}
   4349 
   4350 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4351 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4352 		goto release;
   4353 
   4354 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4355 
   4356 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4357 	if (rv != 0)
   4358 		goto release;
   4359 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4360 
   4361 	if (d0_state) {
   4362 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4363 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4364 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4365 			oem_reg |= HV_OEM_BITS_LPLU;
   4366 	} else {
   4367 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4368 		    != 0)
   4369 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4370 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4371 		    != 0)
   4372 			oem_reg |= HV_OEM_BITS_LPLU;
   4373 	}
   4374 
   4375 	/* Set Restart auto-neg to activate the bits */
   4376 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4377 	    && (wm_phy_resetisblocked(sc) == false))
   4378 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4379 
   4380 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4381 
   4382 release:
   4383 	sc->phy.release(sc);
   4384 
   4385 	return rv;
   4386 }
   4387 
   4388 /* Init hardware bits */
   4389 void
   4390 wm_initialize_hardware_bits(struct wm_softc *sc)
   4391 {
   4392 	uint32_t tarc0, tarc1, reg;
   4393 
   4394 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4395 		device_xname(sc->sc_dev), __func__));
   4396 
   4397 	/* For 82571 variant, 80003 and ICHs */
   4398 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4399 	    || (sc->sc_type >= WM_T_80003)) {
   4400 
   4401 		/* Transmit Descriptor Control 0 */
   4402 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4403 		reg |= TXDCTL_COUNT_DESC;
   4404 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4405 
   4406 		/* Transmit Descriptor Control 1 */
   4407 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4408 		reg |= TXDCTL_COUNT_DESC;
   4409 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4410 
   4411 		/* TARC0 */
   4412 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4413 		switch (sc->sc_type) {
   4414 		case WM_T_82571:
   4415 		case WM_T_82572:
   4416 		case WM_T_82573:
   4417 		case WM_T_82574:
   4418 		case WM_T_82583:
   4419 		case WM_T_80003:
   4420 			/* Clear bits 30..27 */
   4421 			tarc0 &= ~__BITS(30, 27);
   4422 			break;
   4423 		default:
   4424 			break;
   4425 		}
   4426 
   4427 		switch (sc->sc_type) {
   4428 		case WM_T_82571:
   4429 		case WM_T_82572:
   4430 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4431 
   4432 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4433 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4434 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4435 			/* 8257[12] Errata No.7 */
   4436 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4437 
   4438 			/* TARC1 bit 28 */
   4439 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4440 				tarc1 &= ~__BIT(28);
   4441 			else
   4442 				tarc1 |= __BIT(28);
   4443 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4444 
   4445 			/*
   4446 			 * 8257[12] Errata No.13
   4447 			 * Disable Dyamic Clock Gating.
   4448 			 */
   4449 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4450 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4451 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4452 			break;
   4453 		case WM_T_82573:
   4454 		case WM_T_82574:
   4455 		case WM_T_82583:
   4456 			if ((sc->sc_type == WM_T_82574)
   4457 			    || (sc->sc_type == WM_T_82583))
   4458 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4459 
   4460 			/* Extended Device Control */
   4461 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4462 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4463 			reg |= __BIT(22);	/* Set bit 22 */
   4464 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4465 
   4466 			/* Device Control */
   4467 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4468 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4469 
   4470 			/* PCIe Control Register */
   4471 			/*
   4472 			 * 82573 Errata (unknown).
   4473 			 *
   4474 			 * 82574 Errata 25 and 82583 Errata 12
   4475 			 * "Dropped Rx Packets":
   4476 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4477 			 */
   4478 			reg = CSR_READ(sc, WMREG_GCR);
   4479 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4480 			CSR_WRITE(sc, WMREG_GCR, reg);
   4481 
   4482 			if ((sc->sc_type == WM_T_82574)
   4483 			    || (sc->sc_type == WM_T_82583)) {
   4484 				/*
   4485 				 * Document says this bit must be set for
   4486 				 * proper operation.
   4487 				 */
   4488 				reg = CSR_READ(sc, WMREG_GCR);
   4489 				reg |= __BIT(22);
   4490 				CSR_WRITE(sc, WMREG_GCR, reg);
   4491 
   4492 				/*
   4493 				 * Apply workaround for hardware errata
   4494 				 * documented in errata docs Fixes issue where
   4495 				 * some error prone or unreliable PCIe
   4496 				 * completions are occurring, particularly
   4497 				 * with ASPM enabled. Without fix, issue can
   4498 				 * cause Tx timeouts.
   4499 				 */
   4500 				reg = CSR_READ(sc, WMREG_GCR2);
   4501 				reg |= __BIT(0);
   4502 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4503 			}
   4504 			break;
   4505 		case WM_T_80003:
   4506 			/* TARC0 */
   4507 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4508 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4509 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4510 
   4511 			/* TARC1 bit 28 */
   4512 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4513 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4514 				tarc1 &= ~__BIT(28);
   4515 			else
   4516 				tarc1 |= __BIT(28);
   4517 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4518 			break;
   4519 		case WM_T_ICH8:
   4520 		case WM_T_ICH9:
   4521 		case WM_T_ICH10:
   4522 		case WM_T_PCH:
   4523 		case WM_T_PCH2:
   4524 		case WM_T_PCH_LPT:
   4525 		case WM_T_PCH_SPT:
   4526 		case WM_T_PCH_CNP:
   4527 			/* TARC0 */
   4528 			if (sc->sc_type == WM_T_ICH8) {
   4529 				/* Set TARC0 bits 29 and 28 */
   4530 				tarc0 |= __BITS(29, 28);
   4531 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4532 				tarc0 |= __BIT(29);
   4533 				/*
   4534 				 *  Drop bit 28. From Linux.
   4535 				 * See I218/I219 spec update
   4536 				 * "5. Buffer Overrun While the I219 is
   4537 				 * Processing DMA Transactions"
   4538 				 */
   4539 				tarc0 &= ~__BIT(28);
   4540 			}
   4541 			/* Set TARC0 bits 23,24,26,27 */
   4542 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4543 
   4544 			/* CTRL_EXT */
   4545 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4546 			reg |= __BIT(22);	/* Set bit 22 */
   4547 			/*
   4548 			 * Enable PHY low-power state when MAC is at D3
   4549 			 * w/o WoL
   4550 			 */
   4551 			if (sc->sc_type >= WM_T_PCH)
   4552 				reg |= CTRL_EXT_PHYPDEN;
   4553 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4554 
   4555 			/* TARC1 */
   4556 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4557 			/* bit 28 */
   4558 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4559 				tarc1 &= ~__BIT(28);
   4560 			else
   4561 				tarc1 |= __BIT(28);
   4562 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4563 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4564 
   4565 			/* Device Status */
   4566 			if (sc->sc_type == WM_T_ICH8) {
   4567 				reg = CSR_READ(sc, WMREG_STATUS);
   4568 				reg &= ~__BIT(31);
   4569 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4570 
   4571 			}
   4572 
   4573 			/* IOSFPC */
   4574 			if (sc->sc_type == WM_T_PCH_SPT) {
   4575 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4576 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4577 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4578 			}
   4579 			/*
   4580 			 * Work-around descriptor data corruption issue during
   4581 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4582 			 * capability.
   4583 			 */
   4584 			reg = CSR_READ(sc, WMREG_RFCTL);
   4585 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4586 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4587 			break;
   4588 		default:
   4589 			break;
   4590 		}
   4591 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4592 
   4593 		switch (sc->sc_type) {
   4594 		/*
   4595 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4596 		 * Avoid RSS Hash Value bug.
   4597 		 */
   4598 		case WM_T_82571:
   4599 		case WM_T_82572:
   4600 		case WM_T_82573:
   4601 		case WM_T_80003:
   4602 		case WM_T_ICH8:
   4603 			reg = CSR_READ(sc, WMREG_RFCTL);
   4604 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4605 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4606 			break;
   4607 		case WM_T_82574:
   4608 			/* Use extened Rx descriptor. */
   4609 			reg = CSR_READ(sc, WMREG_RFCTL);
   4610 			reg |= WMREG_RFCTL_EXSTEN;
   4611 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4612 			break;
   4613 		default:
   4614 			break;
   4615 		}
   4616 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4617 		/*
   4618 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4619 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4620 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4621 		 * Correctly by the Device"
   4622 		 *
   4623 		 * I354(C2000) Errata AVR53:
   4624 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4625 		 * Hang"
   4626 		 */
   4627 		reg = CSR_READ(sc, WMREG_RFCTL);
   4628 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4629 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4630 	}
   4631 }
   4632 
   4633 static uint32_t
   4634 wm_rxpbs_adjust_82580(uint32_t val)
   4635 {
   4636 	uint32_t rv = 0;
   4637 
   4638 	if (val < __arraycount(wm_82580_rxpbs_table))
   4639 		rv = wm_82580_rxpbs_table[val];
   4640 
   4641 	return rv;
   4642 }
   4643 
   4644 /*
   4645  * wm_reset_phy:
   4646  *
   4647  *	generic PHY reset function.
   4648  *	Same as e1000_phy_hw_reset_generic()
   4649  */
   4650 static int
   4651 wm_reset_phy(struct wm_softc *sc)
   4652 {
   4653 	uint32_t reg;
   4654 
   4655 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4656 		device_xname(sc->sc_dev), __func__));
   4657 	if (wm_phy_resetisblocked(sc))
   4658 		return -1;
   4659 
   4660 	sc->phy.acquire(sc);
   4661 
   4662 	reg = CSR_READ(sc, WMREG_CTRL);
   4663 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4664 	CSR_WRITE_FLUSH(sc);
   4665 
   4666 	delay(sc->phy.reset_delay_us);
   4667 
   4668 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4669 	CSR_WRITE_FLUSH(sc);
   4670 
   4671 	delay(150);
   4672 
   4673 	sc->phy.release(sc);
   4674 
   4675 	wm_get_cfg_done(sc);
   4676 	wm_phy_post_reset(sc);
   4677 
   4678 	return 0;
   4679 }
   4680 
   4681 /*
   4682  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4683  * so it is enough to check sc->sc_queue[0] only.
   4684  */
   4685 static void
   4686 wm_flush_desc_rings(struct wm_softc *sc)
   4687 {
   4688 	pcireg_t preg;
   4689 	uint32_t reg;
   4690 	struct wm_txqueue *txq;
   4691 	wiseman_txdesc_t *txd;
   4692 	int nexttx;
   4693 	uint32_t rctl;
   4694 
   4695 	/* First, disable MULR fix in FEXTNVM11 */
   4696 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4697 	reg |= FEXTNVM11_DIS_MULRFIX;
   4698 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4699 
   4700 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4701 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4702 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4703 		return;
   4704 
   4705 	/* TX */
   4706 	device_printf(sc->sc_dev, "Need TX flush (reg = %08x, len = %u)\n",
   4707 	    preg, reg);
   4708 	reg = CSR_READ(sc, WMREG_TCTL);
   4709 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4710 
   4711 	txq = &sc->sc_queue[0].wmq_txq;
   4712 	nexttx = txq->txq_next;
   4713 	txd = &txq->txq_descs[nexttx];
   4714 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4715 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4716 	txd->wtx_fields.wtxu_status = 0;
   4717 	txd->wtx_fields.wtxu_options = 0;
   4718 	txd->wtx_fields.wtxu_vlan = 0;
   4719 
   4720 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4721 	    BUS_SPACE_BARRIER_WRITE);
   4722 
   4723 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4724 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4725 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4726 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4727 	delay(250);
   4728 
   4729 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4730 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4731 		return;
   4732 
   4733 	/* RX */
   4734 	device_printf(sc->sc_dev, "Need RX flush (reg = %08x)\n", preg);
   4735 	rctl = CSR_READ(sc, WMREG_RCTL);
   4736 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4737 	CSR_WRITE_FLUSH(sc);
   4738 	delay(150);
   4739 
   4740 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4741 	/* Zero the lower 14 bits (prefetch and host thresholds) */
   4742 	reg &= 0xffffc000;
   4743 	/*
   4744 	 * Update thresholds: prefetch threshold to 31, host threshold
   4745 	 * to 1 and make sure the granularity is "descriptors" and not
   4746 	 * "cache lines"
   4747 	 */
   4748 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4749 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4750 
   4751 	/* Momentarily enable the RX ring for the changes to take effect */
   4752 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4753 	CSR_WRITE_FLUSH(sc);
   4754 	delay(150);
   4755 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4756 }
   4757 
   4758 /*
   4759  * wm_reset:
   4760  *
   4761  *	Reset the i82542 chip.
   4762  */
   4763 static void
   4764 wm_reset(struct wm_softc *sc)
   4765 {
   4766 	int phy_reset = 0;
   4767 	int i, error = 0;
   4768 	uint32_t reg;
   4769 	uint16_t kmreg;
   4770 	int rv;
   4771 
   4772 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4773 		device_xname(sc->sc_dev), __func__));
   4774 	KASSERT(sc->sc_type != 0);
   4775 
   4776 	/*
   4777 	 * Allocate on-chip memory according to the MTU size.
   4778 	 * The Packet Buffer Allocation register must be written
   4779 	 * before the chip is reset.
   4780 	 */
   4781 	switch (sc->sc_type) {
   4782 	case WM_T_82547:
   4783 	case WM_T_82547_2:
   4784 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4785 		    PBA_22K : PBA_30K;
   4786 		for (i = 0; i < sc->sc_nqueues; i++) {
   4787 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4788 			txq->txq_fifo_head = 0;
   4789 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4790 			txq->txq_fifo_size =
   4791 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4792 			txq->txq_fifo_stall = 0;
   4793 		}
   4794 		break;
   4795 	case WM_T_82571:
   4796 	case WM_T_82572:
   4797 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4798 	case WM_T_80003:
   4799 		sc->sc_pba = PBA_32K;
   4800 		break;
   4801 	case WM_T_82573:
   4802 		sc->sc_pba = PBA_12K;
   4803 		break;
   4804 	case WM_T_82574:
   4805 	case WM_T_82583:
   4806 		sc->sc_pba = PBA_20K;
   4807 		break;
   4808 	case WM_T_82576:
   4809 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4810 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4811 		break;
   4812 	case WM_T_82580:
   4813 	case WM_T_I350:
   4814 	case WM_T_I354:
   4815 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4816 		break;
   4817 	case WM_T_I210:
   4818 	case WM_T_I211:
   4819 		sc->sc_pba = PBA_34K;
   4820 		break;
   4821 	case WM_T_ICH8:
   4822 		/* Workaround for a bit corruption issue in FIFO memory */
   4823 		sc->sc_pba = PBA_8K;
   4824 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4825 		break;
   4826 	case WM_T_ICH9:
   4827 	case WM_T_ICH10:
   4828 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4829 		    PBA_14K : PBA_10K;
   4830 		break;
   4831 	case WM_T_PCH:
   4832 	case WM_T_PCH2:	/* XXX 14K? */
   4833 	case WM_T_PCH_LPT:
   4834 	case WM_T_PCH_SPT:
   4835 	case WM_T_PCH_CNP:
   4836 		sc->sc_pba = PBA_26K;
   4837 		break;
   4838 	default:
   4839 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4840 		    PBA_40K : PBA_48K;
   4841 		break;
   4842 	}
   4843 	/*
   4844 	 * Only old or non-multiqueue devices have the PBA register
   4845 	 * XXX Need special handling for 82575.
   4846 	 */
   4847 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4848 	    || (sc->sc_type == WM_T_82575))
   4849 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4850 
   4851 	/* Prevent the PCI-E bus from sticking */
   4852 	if (sc->sc_flags & WM_F_PCIE) {
   4853 		int timeout = 800;
   4854 
   4855 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4856 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4857 
   4858 		while (timeout--) {
   4859 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4860 			    == 0)
   4861 				break;
   4862 			delay(100);
   4863 		}
   4864 		if (timeout == 0)
   4865 			device_printf(sc->sc_dev,
   4866 			    "failed to disable busmastering\n");
   4867 	}
   4868 
   4869 	/* Set the completion timeout for interface */
   4870 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4871 	    || (sc->sc_type == WM_T_82580)
   4872 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4873 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4874 		wm_set_pcie_completion_timeout(sc);
   4875 
   4876 	/* Clear interrupt */
   4877 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4878 	if (wm_is_using_msix(sc)) {
   4879 		if (sc->sc_type != WM_T_82574) {
   4880 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4881 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4882 		} else
   4883 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4884 	}
   4885 
   4886 	/* Stop the transmit and receive processes. */
   4887 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4888 	sc->sc_rctl &= ~RCTL_EN;
   4889 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4890 	CSR_WRITE_FLUSH(sc);
   4891 
   4892 	/* XXX set_tbi_sbp_82543() */
   4893 
   4894 	delay(10*1000);
   4895 
   4896 	/* Must acquire the MDIO ownership before MAC reset */
   4897 	switch (sc->sc_type) {
   4898 	case WM_T_82573:
   4899 	case WM_T_82574:
   4900 	case WM_T_82583:
   4901 		error = wm_get_hw_semaphore_82573(sc);
   4902 		break;
   4903 	default:
   4904 		break;
   4905 	}
   4906 
   4907 	/*
   4908 	 * 82541 Errata 29? & 82547 Errata 28?
   4909 	 * See also the description about PHY_RST bit in CTRL register
   4910 	 * in 8254x_GBe_SDM.pdf.
   4911 	 */
   4912 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4913 		CSR_WRITE(sc, WMREG_CTRL,
   4914 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4915 		CSR_WRITE_FLUSH(sc);
   4916 		delay(5000);
   4917 	}
   4918 
   4919 	switch (sc->sc_type) {
   4920 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4921 	case WM_T_82541:
   4922 	case WM_T_82541_2:
   4923 	case WM_T_82547:
   4924 	case WM_T_82547_2:
   4925 		/*
   4926 		 * On some chipsets, a reset through a memory-mapped write
   4927 		 * cycle can cause the chip to reset before completing the
   4928 		 * write cycle. This causes major headache that can be avoided
   4929 		 * by issuing the reset via indirect register writes through
   4930 		 * I/O space.
   4931 		 *
   4932 		 * So, if we successfully mapped the I/O BAR at attach time,
   4933 		 * use that. Otherwise, try our luck with a memory-mapped
   4934 		 * reset.
   4935 		 */
   4936 		if (sc->sc_flags & WM_F_IOH_VALID)
   4937 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4938 		else
   4939 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4940 		break;
   4941 	case WM_T_82545_3:
   4942 	case WM_T_82546_3:
   4943 		/* Use the shadow control register on these chips. */
   4944 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4945 		break;
   4946 	case WM_T_80003:
   4947 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4948 		sc->phy.acquire(sc);
   4949 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4950 		sc->phy.release(sc);
   4951 		break;
   4952 	case WM_T_ICH8:
   4953 	case WM_T_ICH9:
   4954 	case WM_T_ICH10:
   4955 	case WM_T_PCH:
   4956 	case WM_T_PCH2:
   4957 	case WM_T_PCH_LPT:
   4958 	case WM_T_PCH_SPT:
   4959 	case WM_T_PCH_CNP:
   4960 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4961 		if (wm_phy_resetisblocked(sc) == false) {
   4962 			/*
   4963 			 * Gate automatic PHY configuration by hardware on
   4964 			 * non-managed 82579
   4965 			 */
   4966 			if ((sc->sc_type == WM_T_PCH2)
   4967 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4968 				== 0))
   4969 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4970 
   4971 			reg |= CTRL_PHY_RESET;
   4972 			phy_reset = 1;
   4973 		} else
   4974 			device_printf(sc->sc_dev, "XXX reset is blocked!!!\n");
   4975 		sc->phy.acquire(sc);
   4976 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4977 		/* Don't insert a completion barrier when reset */
   4978 		delay(20*1000);
   4979 		mutex_exit(sc->sc_ich_phymtx);
   4980 		break;
   4981 	case WM_T_82580:
   4982 	case WM_T_I350:
   4983 	case WM_T_I354:
   4984 	case WM_T_I210:
   4985 	case WM_T_I211:
   4986 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4987 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4988 			CSR_WRITE_FLUSH(sc);
   4989 		delay(5000);
   4990 		break;
   4991 	case WM_T_82542_2_0:
   4992 	case WM_T_82542_2_1:
   4993 	case WM_T_82543:
   4994 	case WM_T_82540:
   4995 	case WM_T_82545:
   4996 	case WM_T_82546:
   4997 	case WM_T_82571:
   4998 	case WM_T_82572:
   4999 	case WM_T_82573:
   5000 	case WM_T_82574:
   5001 	case WM_T_82575:
   5002 	case WM_T_82576:
   5003 	case WM_T_82583:
   5004 	default:
   5005 		/* Everything else can safely use the documented method. */
   5006 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   5007 		break;
   5008 	}
   5009 
   5010 	/* Must release the MDIO ownership after MAC reset */
   5011 	switch (sc->sc_type) {
   5012 	case WM_T_82573:
   5013 	case WM_T_82574:
   5014 	case WM_T_82583:
   5015 		if (error == 0)
   5016 			wm_put_hw_semaphore_82573(sc);
   5017 		break;
   5018 	default:
   5019 		break;
   5020 	}
   5021 
   5022 	/* Set Phy Config Counter to 50msec */
   5023 	if (sc->sc_type == WM_T_PCH2) {
   5024 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   5025 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   5026 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   5027 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   5028 	}
   5029 
   5030 	if (phy_reset != 0)
   5031 		wm_get_cfg_done(sc);
   5032 
   5033 	/* Reload EEPROM */
   5034 	switch (sc->sc_type) {
   5035 	case WM_T_82542_2_0:
   5036 	case WM_T_82542_2_1:
   5037 	case WM_T_82543:
   5038 	case WM_T_82544:
   5039 		delay(10);
   5040 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5041 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5042 		CSR_WRITE_FLUSH(sc);
   5043 		delay(2000);
   5044 		break;
   5045 	case WM_T_82540:
   5046 	case WM_T_82545:
   5047 	case WM_T_82545_3:
   5048 	case WM_T_82546:
   5049 	case WM_T_82546_3:
   5050 		delay(5*1000);
   5051 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5052 		break;
   5053 	case WM_T_82541:
   5054 	case WM_T_82541_2:
   5055 	case WM_T_82547:
   5056 	case WM_T_82547_2:
   5057 		delay(20000);
   5058 		/* XXX Disable HW ARPs on ASF enabled adapters */
   5059 		break;
   5060 	case WM_T_82571:
   5061 	case WM_T_82572:
   5062 	case WM_T_82573:
   5063 	case WM_T_82574:
   5064 	case WM_T_82583:
   5065 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   5066 			delay(10);
   5067 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   5068 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5069 			CSR_WRITE_FLUSH(sc);
   5070 		}
   5071 		/* check EECD_EE_AUTORD */
   5072 		wm_get_auto_rd_done(sc);
   5073 		/*
   5074 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   5075 		 * is set.
   5076 		 */
   5077 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   5078 		    || (sc->sc_type == WM_T_82583))
   5079 			delay(25*1000);
   5080 		break;
   5081 	case WM_T_82575:
   5082 	case WM_T_82576:
   5083 	case WM_T_82580:
   5084 	case WM_T_I350:
   5085 	case WM_T_I354:
   5086 	case WM_T_I210:
   5087 	case WM_T_I211:
   5088 	case WM_T_80003:
   5089 		/* check EECD_EE_AUTORD */
   5090 		wm_get_auto_rd_done(sc);
   5091 		break;
   5092 	case WM_T_ICH8:
   5093 	case WM_T_ICH9:
   5094 	case WM_T_ICH10:
   5095 	case WM_T_PCH:
   5096 	case WM_T_PCH2:
   5097 	case WM_T_PCH_LPT:
   5098 	case WM_T_PCH_SPT:
   5099 	case WM_T_PCH_CNP:
   5100 		break;
   5101 	default:
   5102 		panic("%s: unknown type\n", __func__);
   5103 	}
   5104 
   5105 	/* Check whether EEPROM is present or not */
   5106 	switch (sc->sc_type) {
   5107 	case WM_T_82575:
   5108 	case WM_T_82576:
   5109 	case WM_T_82580:
   5110 	case WM_T_I350:
   5111 	case WM_T_I354:
   5112 	case WM_T_ICH8:
   5113 	case WM_T_ICH9:
   5114 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5115 			/* Not found */
   5116 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5117 			if (sc->sc_type == WM_T_82575)
   5118 				wm_reset_init_script_82575(sc);
   5119 		}
   5120 		break;
   5121 	default:
   5122 		break;
   5123 	}
   5124 
   5125 	if (phy_reset != 0)
   5126 		wm_phy_post_reset(sc);
   5127 
   5128 	if ((sc->sc_type == WM_T_82580)
   5129 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5130 		/* Clear global device reset status bit */
   5131 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5132 	}
   5133 
   5134 	/* Clear any pending interrupt events. */
   5135 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5136 	reg = CSR_READ(sc, WMREG_ICR);
   5137 	if (wm_is_using_msix(sc)) {
   5138 		if (sc->sc_type != WM_T_82574) {
   5139 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5140 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5141 		} else
   5142 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5143 	}
   5144 
   5145 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5146 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5147 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5148 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5149 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5150 		reg |= KABGTXD_BGSQLBIAS;
   5151 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5152 	}
   5153 
   5154 	/* Reload sc_ctrl */
   5155 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5156 
   5157 	wm_set_eee(sc);
   5158 
   5159 	/*
   5160 	 * For PCH, this write will make sure that any noise will be detected
   5161 	 * as a CRC error and be dropped rather than show up as a bad packet
   5162 	 * to the DMA engine
   5163 	 */
   5164 	if (sc->sc_type == WM_T_PCH)
   5165 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5166 
   5167 	if (sc->sc_type >= WM_T_82544)
   5168 		CSR_WRITE(sc, WMREG_WUC, 0);
   5169 
   5170 	if (sc->sc_type < WM_T_82575)
   5171 		wm_disable_aspm(sc); /* Workaround for some chips */
   5172 
   5173 	wm_reset_mdicnfg_82580(sc);
   5174 
   5175 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5176 		wm_pll_workaround_i210(sc);
   5177 
   5178 	if (sc->sc_type == WM_T_80003) {
   5179 		/* Default to TRUE to enable the MDIC W/A */
   5180 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5181 
   5182 		rv = wm_kmrn_readreg(sc,
   5183 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5184 		if (rv == 0) {
   5185 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5186 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5187 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5188 			else
   5189 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5190 		}
   5191 	}
   5192 }
   5193 
   5194 /*
   5195  * wm_add_rxbuf:
   5196  *
   5197  *	Add a receive buffer to the indiciated descriptor.
   5198  */
   5199 static int
   5200 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5201 {
   5202 	struct wm_softc *sc = rxq->rxq_sc;
   5203 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5204 	struct mbuf *m;
   5205 	int error;
   5206 
   5207 	KASSERT(mutex_owned(rxq->rxq_lock));
   5208 
   5209 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5210 	if (m == NULL)
   5211 		return ENOBUFS;
   5212 
   5213 	MCLGET(m, M_DONTWAIT);
   5214 	if ((m->m_flags & M_EXT) == 0) {
   5215 		m_freem(m);
   5216 		return ENOBUFS;
   5217 	}
   5218 
   5219 	if (rxs->rxs_mbuf != NULL)
   5220 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5221 
   5222 	rxs->rxs_mbuf = m;
   5223 
   5224 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5225 	/*
   5226 	 * Cannot use bus_dmamap_load_mbuf() here because m_data may be
   5227 	 * sc_align_tweak'd between bus_dmamap_load() and bus_dmamap_sync().
   5228 	 */
   5229 	error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, m->m_ext.ext_buf,
   5230 	    m->m_ext.ext_size, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT);
   5231 	if (error) {
   5232 		/* XXX XXX XXX */
   5233 		aprint_error_dev(sc->sc_dev,
   5234 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5235 		panic("wm_add_rxbuf");
   5236 	}
   5237 
   5238 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5239 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5240 
   5241 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5242 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5243 			wm_init_rxdesc(rxq, idx);
   5244 	} else
   5245 		wm_init_rxdesc(rxq, idx);
   5246 
   5247 	return 0;
   5248 }
   5249 
   5250 /*
   5251  * wm_rxdrain:
   5252  *
   5253  *	Drain the receive queue.
   5254  */
   5255 static void
   5256 wm_rxdrain(struct wm_rxqueue *rxq)
   5257 {
   5258 	struct wm_softc *sc = rxq->rxq_sc;
   5259 	struct wm_rxsoft *rxs;
   5260 	int i;
   5261 
   5262 	KASSERT(mutex_owned(rxq->rxq_lock));
   5263 
   5264 	for (i = 0; i < WM_NRXDESC; i++) {
   5265 		rxs = &rxq->rxq_soft[i];
   5266 		if (rxs->rxs_mbuf != NULL) {
   5267 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5268 			m_freem(rxs->rxs_mbuf);
   5269 			rxs->rxs_mbuf = NULL;
   5270 		}
   5271 	}
   5272 }
   5273 
   5274 /*
   5275  * Setup registers for RSS.
   5276  *
   5277  * XXX not yet VMDq support
   5278  */
   5279 static void
   5280 wm_init_rss(struct wm_softc *sc)
   5281 {
   5282 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5283 	int i;
   5284 
   5285 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5286 
   5287 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5288 		unsigned int qid, reta_ent;
   5289 
   5290 		qid  = i % sc->sc_nqueues;
   5291 		switch (sc->sc_type) {
   5292 		case WM_T_82574:
   5293 			reta_ent = __SHIFTIN(qid,
   5294 			    RETA_ENT_QINDEX_MASK_82574);
   5295 			break;
   5296 		case WM_T_82575:
   5297 			reta_ent = __SHIFTIN(qid,
   5298 			    RETA_ENT_QINDEX1_MASK_82575);
   5299 			break;
   5300 		default:
   5301 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5302 			break;
   5303 		}
   5304 
   5305 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5306 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5307 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5308 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5309 	}
   5310 
   5311 	rss_getkey((uint8_t *)rss_key);
   5312 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5313 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5314 
   5315 	if (sc->sc_type == WM_T_82574)
   5316 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5317 	else
   5318 		mrqc = MRQC_ENABLE_RSS_MQ;
   5319 
   5320 	/*
   5321 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5322 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5323 	 */
   5324 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5325 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5326 #if 0
   5327 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5328 	mrqc |= MRQC_RSS_FIELD_IPV6_UDP_EX;
   5329 #endif
   5330 	mrqc |= MRQC_RSS_FIELD_IPV6_TCP_EX;
   5331 
   5332 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5333 }
   5334 
   5335 /*
   5336  * Adjust TX and RX queue numbers which the system actulally uses.
   5337  *
   5338  * The numbers are affected by below parameters.
   5339  *     - The nubmer of hardware queues
   5340  *     - The number of MSI-X vectors (= "nvectors" argument)
   5341  *     - ncpu
   5342  */
   5343 static void
   5344 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5345 {
   5346 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5347 
   5348 	if (nvectors < 2) {
   5349 		sc->sc_nqueues = 1;
   5350 		return;
   5351 	}
   5352 
   5353 	switch (sc->sc_type) {
   5354 	case WM_T_82572:
   5355 		hw_ntxqueues = 2;
   5356 		hw_nrxqueues = 2;
   5357 		break;
   5358 	case WM_T_82574:
   5359 		hw_ntxqueues = 2;
   5360 		hw_nrxqueues = 2;
   5361 		break;
   5362 	case WM_T_82575:
   5363 		hw_ntxqueues = 4;
   5364 		hw_nrxqueues = 4;
   5365 		break;
   5366 	case WM_T_82576:
   5367 		hw_ntxqueues = 16;
   5368 		hw_nrxqueues = 16;
   5369 		break;
   5370 	case WM_T_82580:
   5371 	case WM_T_I350:
   5372 	case WM_T_I354:
   5373 		hw_ntxqueues = 8;
   5374 		hw_nrxqueues = 8;
   5375 		break;
   5376 	case WM_T_I210:
   5377 		hw_ntxqueues = 4;
   5378 		hw_nrxqueues = 4;
   5379 		break;
   5380 	case WM_T_I211:
   5381 		hw_ntxqueues = 2;
   5382 		hw_nrxqueues = 2;
   5383 		break;
   5384 		/*
   5385 		 * As below ethernet controllers does not support MSI-X,
   5386 		 * this driver let them not use multiqueue.
   5387 		 *     - WM_T_80003
   5388 		 *     - WM_T_ICH8
   5389 		 *     - WM_T_ICH9
   5390 		 *     - WM_T_ICH10
   5391 		 *     - WM_T_PCH
   5392 		 *     - WM_T_PCH2
   5393 		 *     - WM_T_PCH_LPT
   5394 		 */
   5395 	default:
   5396 		hw_ntxqueues = 1;
   5397 		hw_nrxqueues = 1;
   5398 		break;
   5399 	}
   5400 
   5401 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5402 
   5403 	/*
   5404 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5405 	 * the number of queues used actually.
   5406 	 */
   5407 	if (nvectors < hw_nqueues + 1)
   5408 		sc->sc_nqueues = nvectors - 1;
   5409 	else
   5410 		sc->sc_nqueues = hw_nqueues;
   5411 
   5412 	/*
   5413 	 * As queues more then cpus cannot improve scaling, we limit
   5414 	 * the number of queues used actually.
   5415 	 */
   5416 	if (ncpu < sc->sc_nqueues)
   5417 		sc->sc_nqueues = ncpu;
   5418 }
   5419 
   5420 static inline bool
   5421 wm_is_using_msix(struct wm_softc *sc)
   5422 {
   5423 
   5424 	return (sc->sc_nintrs > 1);
   5425 }
   5426 
   5427 static inline bool
   5428 wm_is_using_multiqueue(struct wm_softc *sc)
   5429 {
   5430 
   5431 	return (sc->sc_nqueues > 1);
   5432 }
   5433 
   5434 static int
   5435 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5436 {
   5437 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5438 
   5439 	wmq->wmq_id = qidx;
   5440 	wmq->wmq_intr_idx = intr_idx;
   5441 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5442 #ifdef WM_MPSAFE
   5443 	    | SOFTINT_MPSAFE
   5444 #endif
   5445 	    , wm_handle_queue, wmq);
   5446 	if (wmq->wmq_si != NULL)
   5447 		return 0;
   5448 
   5449 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5450 	    wmq->wmq_id);
   5451 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5452 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5453 	return ENOMEM;
   5454 }
   5455 
   5456 /*
   5457  * Both single interrupt MSI and INTx can use this function.
   5458  */
   5459 static int
   5460 wm_setup_legacy(struct wm_softc *sc)
   5461 {
   5462 	pci_chipset_tag_t pc = sc->sc_pc;
   5463 	const char *intrstr = NULL;
   5464 	char intrbuf[PCI_INTRSTR_LEN];
   5465 	int error;
   5466 
   5467 	error = wm_alloc_txrx_queues(sc);
   5468 	if (error) {
   5469 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5470 		    error);
   5471 		return ENOMEM;
   5472 	}
   5473 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5474 	    sizeof(intrbuf));
   5475 #ifdef WM_MPSAFE
   5476 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5477 #endif
   5478 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5479 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5480 	if (sc->sc_ihs[0] == NULL) {
   5481 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5482 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5483 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5484 		return ENOMEM;
   5485 	}
   5486 
   5487 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5488 	sc->sc_nintrs = 1;
   5489 
   5490 	return wm_softint_establish(sc, 0, 0);
   5491 }
   5492 
   5493 static int
   5494 wm_setup_msix(struct wm_softc *sc)
   5495 {
   5496 	void *vih;
   5497 	kcpuset_t *affinity;
   5498 	int qidx, error, intr_idx, txrx_established;
   5499 	pci_chipset_tag_t pc = sc->sc_pc;
   5500 	const char *intrstr = NULL;
   5501 	char intrbuf[PCI_INTRSTR_LEN];
   5502 	char intr_xname[INTRDEVNAMEBUF];
   5503 
   5504 	if (sc->sc_nqueues < ncpu) {
   5505 		/*
   5506 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5507 		 * interrupts start from CPU#1.
   5508 		 */
   5509 		sc->sc_affinity_offset = 1;
   5510 	} else {
   5511 		/*
   5512 		 * In this case, this device use all CPUs. So, we unify
   5513 		 * affinitied cpu_index to msix vector number for readability.
   5514 		 */
   5515 		sc->sc_affinity_offset = 0;
   5516 	}
   5517 
   5518 	error = wm_alloc_txrx_queues(sc);
   5519 	if (error) {
   5520 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5521 		    error);
   5522 		return ENOMEM;
   5523 	}
   5524 
   5525 	kcpuset_create(&affinity, false);
   5526 	intr_idx = 0;
   5527 
   5528 	/*
   5529 	 * TX and RX
   5530 	 */
   5531 	txrx_established = 0;
   5532 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5533 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5534 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5535 
   5536 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5537 		    sizeof(intrbuf));
   5538 #ifdef WM_MPSAFE
   5539 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5540 		    PCI_INTR_MPSAFE, true);
   5541 #endif
   5542 		memset(intr_xname, 0, sizeof(intr_xname));
   5543 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5544 		    device_xname(sc->sc_dev), qidx);
   5545 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5546 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5547 		if (vih == NULL) {
   5548 			aprint_error_dev(sc->sc_dev,
   5549 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5550 			    intrstr ? " at " : "",
   5551 			    intrstr ? intrstr : "");
   5552 
   5553 			goto fail;
   5554 		}
   5555 		kcpuset_zero(affinity);
   5556 		/* Round-robin affinity */
   5557 		kcpuset_set(affinity, affinity_to);
   5558 		error = interrupt_distribute(vih, affinity, NULL);
   5559 		if (error == 0) {
   5560 			aprint_normal_dev(sc->sc_dev,
   5561 			    "for TX and RX interrupting at %s affinity to %u\n",
   5562 			    intrstr, affinity_to);
   5563 		} else {
   5564 			aprint_normal_dev(sc->sc_dev,
   5565 			    "for TX and RX interrupting at %s\n", intrstr);
   5566 		}
   5567 		sc->sc_ihs[intr_idx] = vih;
   5568 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5569 			goto fail;
   5570 		txrx_established++;
   5571 		intr_idx++;
   5572 	}
   5573 
   5574 	/* LINK */
   5575 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5576 	    sizeof(intrbuf));
   5577 #ifdef WM_MPSAFE
   5578 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5579 #endif
   5580 	memset(intr_xname, 0, sizeof(intr_xname));
   5581 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5582 	    device_xname(sc->sc_dev));
   5583 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5584 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5585 	if (vih == NULL) {
   5586 		aprint_error_dev(sc->sc_dev,
   5587 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5588 		    intrstr ? " at " : "",
   5589 		    intrstr ? intrstr : "");
   5590 
   5591 		goto fail;
   5592 	}
   5593 	/* Keep default affinity to LINK interrupt */
   5594 	aprint_normal_dev(sc->sc_dev,
   5595 	    "for LINK interrupting at %s\n", intrstr);
   5596 	sc->sc_ihs[intr_idx] = vih;
   5597 	sc->sc_link_intr_idx = intr_idx;
   5598 
   5599 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5600 	kcpuset_destroy(affinity);
   5601 	return 0;
   5602 
   5603  fail:
   5604 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5605 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5606 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5607 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5608 	}
   5609 
   5610 	kcpuset_destroy(affinity);
   5611 	return ENOMEM;
   5612 }
   5613 
   5614 static void
   5615 wm_unset_stopping_flags(struct wm_softc *sc)
   5616 {
   5617 	int i;
   5618 
   5619 	KASSERT(WM_CORE_LOCKED(sc));
   5620 
   5621 	/* Must unset stopping flags in ascending order. */
   5622 	for (i = 0; i < sc->sc_nqueues; i++) {
   5623 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5624 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5625 
   5626 		mutex_enter(txq->txq_lock);
   5627 		txq->txq_stopping = false;
   5628 		mutex_exit(txq->txq_lock);
   5629 
   5630 		mutex_enter(rxq->rxq_lock);
   5631 		rxq->rxq_stopping = false;
   5632 		mutex_exit(rxq->rxq_lock);
   5633 	}
   5634 
   5635 	sc->sc_core_stopping = false;
   5636 }
   5637 
   5638 static void
   5639 wm_set_stopping_flags(struct wm_softc *sc)
   5640 {
   5641 	int i;
   5642 
   5643 	KASSERT(WM_CORE_LOCKED(sc));
   5644 
   5645 	sc->sc_core_stopping = true;
   5646 
   5647 	/* Must set stopping flags in ascending order. */
   5648 	for (i = 0; i < sc->sc_nqueues; i++) {
   5649 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5650 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5651 
   5652 		mutex_enter(rxq->rxq_lock);
   5653 		rxq->rxq_stopping = true;
   5654 		mutex_exit(rxq->rxq_lock);
   5655 
   5656 		mutex_enter(txq->txq_lock);
   5657 		txq->txq_stopping = true;
   5658 		mutex_exit(txq->txq_lock);
   5659 	}
   5660 }
   5661 
   5662 /*
   5663  * Write interrupt interval value to ITR or EITR
   5664  */
   5665 static void
   5666 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5667 {
   5668 
   5669 	if (!wmq->wmq_set_itr)
   5670 		return;
   5671 
   5672 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5673 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5674 
   5675 		/*
   5676 		 * 82575 doesn't have CNT_INGR field.
   5677 		 * So, overwrite counter field by software.
   5678 		 */
   5679 		if (sc->sc_type == WM_T_82575)
   5680 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5681 		else
   5682 			eitr |= EITR_CNT_INGR;
   5683 
   5684 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5685 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5686 		/*
   5687 		 * 82574 has both ITR and EITR. SET EITR when we use
   5688 		 * the multi queue function with MSI-X.
   5689 		 */
   5690 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5691 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5692 	} else {
   5693 		KASSERT(wmq->wmq_id == 0);
   5694 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5695 	}
   5696 
   5697 	wmq->wmq_set_itr = false;
   5698 }
   5699 
   5700 /*
   5701  * TODO
   5702  * Below dynamic calculation of itr is almost the same as linux igb,
   5703  * however it does not fit to wm(4). So, we will have been disable AIM
   5704  * until we will find appropriate calculation of itr.
   5705  */
   5706 /*
   5707  * calculate interrupt interval value to be going to write register in
   5708  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5709  */
   5710 static void
   5711 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5712 {
   5713 #ifdef NOTYET
   5714 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5715 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5716 	uint32_t avg_size = 0;
   5717 	uint32_t new_itr;
   5718 
   5719 	if (rxq->rxq_packets)
   5720 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5721 	if (txq->txq_packets)
   5722 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5723 
   5724 	if (avg_size == 0) {
   5725 		new_itr = 450; /* restore default value */
   5726 		goto out;
   5727 	}
   5728 
   5729 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5730 	avg_size += 24;
   5731 
   5732 	/* Don't starve jumbo frames */
   5733 	avg_size = uimin(avg_size, 3000);
   5734 
   5735 	/* Give a little boost to mid-size frames */
   5736 	if ((avg_size > 300) && (avg_size < 1200))
   5737 		new_itr = avg_size / 3;
   5738 	else
   5739 		new_itr = avg_size / 2;
   5740 
   5741 out:
   5742 	/*
   5743 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5744 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5745 	 */
   5746 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5747 		new_itr *= 4;
   5748 
   5749 	if (new_itr != wmq->wmq_itr) {
   5750 		wmq->wmq_itr = new_itr;
   5751 		wmq->wmq_set_itr = true;
   5752 	} else
   5753 		wmq->wmq_set_itr = false;
   5754 
   5755 	rxq->rxq_packets = 0;
   5756 	rxq->rxq_bytes = 0;
   5757 	txq->txq_packets = 0;
   5758 	txq->txq_bytes = 0;
   5759 #endif
   5760 }
   5761 
   5762 static void
   5763 wm_init_sysctls(struct wm_softc *sc)
   5764 {
   5765 	struct sysctllog **log;
   5766 	const struct sysctlnode *rnode, *cnode;
   5767 	int rv;
   5768 	const char *dvname;
   5769 
   5770 	log = &sc->sc_sysctllog;
   5771 	dvname = device_xname(sc->sc_dev);
   5772 
   5773 	rv = sysctl_createv(log, 0, NULL, &rnode,
   5774 	    0, CTLTYPE_NODE, dvname,
   5775 	    SYSCTL_DESCR("wm information and settings"),
   5776 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
   5777 	if (rv != 0)
   5778 		goto err;
   5779 
   5780 	rv = sysctl_createv(log, 0, &rnode, &cnode, CTLFLAG_READWRITE,
   5781 	    CTLTYPE_BOOL, "txrx_workqueue", SYSCTL_DESCR("Use workqueue for packet processing"),
   5782 	    NULL, 0, &sc->sc_txrx_use_workqueue, 0, CTL_CREATE, CTL_EOL);
   5783 	if (rv != 0)
   5784 		goto teardown;
   5785 
   5786 	return;
   5787 
   5788 teardown:
   5789 	sysctl_teardown(log);
   5790 err:
   5791 	sc->sc_sysctllog = NULL;
   5792 	device_printf(sc->sc_dev, "%s: sysctl_createv failed, rv = %d\n",
   5793 	    __func__, rv);
   5794 }
   5795 
   5796 /*
   5797  * wm_init:		[ifnet interface function]
   5798  *
   5799  *	Initialize the interface.
   5800  */
   5801 static int
   5802 wm_init(struct ifnet *ifp)
   5803 {
   5804 	struct wm_softc *sc = ifp->if_softc;
   5805 	int ret;
   5806 
   5807 	WM_CORE_LOCK(sc);
   5808 	ret = wm_init_locked(ifp);
   5809 	WM_CORE_UNLOCK(sc);
   5810 
   5811 	return ret;
   5812 }
   5813 
   5814 static int
   5815 wm_init_locked(struct ifnet *ifp)
   5816 {
   5817 	struct wm_softc *sc = ifp->if_softc;
   5818 	struct ethercom *ec = &sc->sc_ethercom;
   5819 	int i, j, trynum, error = 0;
   5820 	uint32_t reg, sfp_mask = 0;
   5821 
   5822 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5823 		device_xname(sc->sc_dev), __func__));
   5824 	KASSERT(WM_CORE_LOCKED(sc));
   5825 
   5826 	/*
   5827 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5828 	 * There is a small but measurable benefit to avoiding the adjusment
   5829 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5830 	 * on such platforms.  One possibility is that the DMA itself is
   5831 	 * slightly more efficient if the front of the entire packet (instead
   5832 	 * of the front of the headers) is aligned.
   5833 	 *
   5834 	 * Note we must always set align_tweak to 0 if we are using
   5835 	 * jumbo frames.
   5836 	 */
   5837 #ifdef __NO_STRICT_ALIGNMENT
   5838 	sc->sc_align_tweak = 0;
   5839 #else
   5840 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5841 		sc->sc_align_tweak = 0;
   5842 	else
   5843 		sc->sc_align_tweak = 2;
   5844 #endif /* __NO_STRICT_ALIGNMENT */
   5845 
   5846 	/* Cancel any pending I/O. */
   5847 	wm_stop_locked(ifp, 0);
   5848 
   5849 	/* Update statistics before reset */
   5850 	if_statadd2(ifp, if_collisions, CSR_READ(sc, WMREG_COLC),
   5851 	    if_ierrors, CSR_READ(sc, WMREG_RXERRC));
   5852 
   5853 	/* PCH_SPT hardware workaround */
   5854 	if (sc->sc_type == WM_T_PCH_SPT)
   5855 		wm_flush_desc_rings(sc);
   5856 
   5857 	/* Reset the chip to a known state. */
   5858 	wm_reset(sc);
   5859 
   5860 	/*
   5861 	 * AMT based hardware can now take control from firmware
   5862 	 * Do this after reset.
   5863 	 */
   5864 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5865 		wm_get_hw_control(sc);
   5866 
   5867 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5868 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5869 		wm_legacy_irq_quirk_spt(sc);
   5870 
   5871 	/* Init hardware bits */
   5872 	wm_initialize_hardware_bits(sc);
   5873 
   5874 	/* Reset the PHY. */
   5875 	if (sc->sc_flags & WM_F_HAS_MII)
   5876 		wm_gmii_reset(sc);
   5877 
   5878 	if (sc->sc_type >= WM_T_ICH8) {
   5879 		reg = CSR_READ(sc, WMREG_GCR);
   5880 		/*
   5881 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5882 		 * default after reset.
   5883 		 */
   5884 		if (sc->sc_type == WM_T_ICH8)
   5885 			reg |= GCR_NO_SNOOP_ALL;
   5886 		else
   5887 			reg &= ~GCR_NO_SNOOP_ALL;
   5888 		CSR_WRITE(sc, WMREG_GCR, reg);
   5889 	}
   5890 	if ((sc->sc_type >= WM_T_ICH8)
   5891 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5892 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5893 
   5894 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5895 		reg |= CTRL_EXT_RO_DIS;
   5896 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5897 	}
   5898 
   5899 	/* Calculate (E)ITR value */
   5900 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5901 		/*
   5902 		 * For NEWQUEUE's EITR (except for 82575).
   5903 		 * 82575's EITR should be set same throttling value as other
   5904 		 * old controllers' ITR because the interrupt/sec calculation
   5905 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5906 		 *
   5907 		 * 82574's EITR should be set same throttling value as ITR.
   5908 		 *
   5909 		 * For N interrupts/sec, set this value to:
   5910 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5911 		 */
   5912 		sc->sc_itr_init = 450;
   5913 	} else if (sc->sc_type >= WM_T_82543) {
   5914 		/*
   5915 		 * Set up the interrupt throttling register (units of 256ns)
   5916 		 * Note that a footnote in Intel's documentation says this
   5917 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5918 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5919 		 * that that is also true for the 1024ns units of the other
   5920 		 * interrupt-related timer registers -- so, really, we ought
   5921 		 * to divide this value by 4 when the link speed is low.
   5922 		 *
   5923 		 * XXX implement this division at link speed change!
   5924 		 */
   5925 
   5926 		/*
   5927 		 * For N interrupts/sec, set this value to:
   5928 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5929 		 * absolute and packet timer values to this value
   5930 		 * divided by 4 to get "simple timer" behavior.
   5931 		 */
   5932 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5933 	}
   5934 
   5935 	error = wm_init_txrx_queues(sc);
   5936 	if (error)
   5937 		goto out;
   5938 
   5939 	if (((sc->sc_flags & WM_F_SGMII) == 0) &&
   5940 	    (sc->sc_mediatype == WM_MEDIATYPE_SERDES) &&
   5941 	    (sc->sc_type >= WM_T_82575))
   5942 		wm_serdes_power_up_link_82575(sc);
   5943 
   5944 	/* Clear out the VLAN table -- we don't use it (yet). */
   5945 	CSR_WRITE(sc, WMREG_VET, 0);
   5946 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5947 		trynum = 10; /* Due to hw errata */
   5948 	else
   5949 		trynum = 1;
   5950 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5951 		for (j = 0; j < trynum; j++)
   5952 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5953 
   5954 	/*
   5955 	 * Set up flow-control parameters.
   5956 	 *
   5957 	 * XXX Values could probably stand some tuning.
   5958 	 */
   5959 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5960 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5961 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5962 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5963 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5964 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5965 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5966 	}
   5967 
   5968 	sc->sc_fcrtl = FCRTL_DFLT;
   5969 	if (sc->sc_type < WM_T_82543) {
   5970 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5971 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5972 	} else {
   5973 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5974 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5975 	}
   5976 
   5977 	if (sc->sc_type == WM_T_80003)
   5978 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5979 	else
   5980 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5981 
   5982 	/* Writes the control register. */
   5983 	wm_set_vlan(sc);
   5984 
   5985 	if (sc->sc_flags & WM_F_HAS_MII) {
   5986 		uint16_t kmreg;
   5987 
   5988 		switch (sc->sc_type) {
   5989 		case WM_T_80003:
   5990 		case WM_T_ICH8:
   5991 		case WM_T_ICH9:
   5992 		case WM_T_ICH10:
   5993 		case WM_T_PCH:
   5994 		case WM_T_PCH2:
   5995 		case WM_T_PCH_LPT:
   5996 		case WM_T_PCH_SPT:
   5997 		case WM_T_PCH_CNP:
   5998 			/*
   5999 			 * Set the mac to wait the maximum time between each
   6000 			 * iteration and increase the max iterations when
   6001 			 * polling the phy; this fixes erroneous timeouts at
   6002 			 * 10Mbps.
   6003 			 */
   6004 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   6005 			    0xFFFF);
   6006 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6007 			    &kmreg);
   6008 			kmreg |= 0x3F;
   6009 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   6010 			    kmreg);
   6011 			break;
   6012 		default:
   6013 			break;
   6014 		}
   6015 
   6016 		if (sc->sc_type == WM_T_80003) {
   6017 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6018 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   6019 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6020 
   6021 			/* Bypass RX and TX FIFO's */
   6022 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   6023 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   6024 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   6025 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   6026 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   6027 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   6028 		}
   6029 	}
   6030 #if 0
   6031 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   6032 #endif
   6033 
   6034 	/* Set up checksum offload parameters. */
   6035 	reg = CSR_READ(sc, WMREG_RXCSUM);
   6036 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   6037 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   6038 		reg |= RXCSUM_IPOFL;
   6039 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   6040 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   6041 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   6042 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   6043 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6044 
   6045 	/* Set registers about MSI-X */
   6046 	if (wm_is_using_msix(sc)) {
   6047 		uint32_t ivar, qintr_idx;
   6048 		struct wm_queue *wmq;
   6049 		unsigned int qid;
   6050 
   6051 		if (sc->sc_type == WM_T_82575) {
   6052 			/* Interrupt control */
   6053 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6054 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   6055 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6056 
   6057 			/* TX and RX */
   6058 			for (i = 0; i < sc->sc_nqueues; i++) {
   6059 				wmq = &sc->sc_queue[i];
   6060 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   6061 				    EITR_TX_QUEUE(wmq->wmq_id)
   6062 				    | EITR_RX_QUEUE(wmq->wmq_id));
   6063 			}
   6064 			/* Link status */
   6065 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   6066 			    EITR_OTHER);
   6067 		} else if (sc->sc_type == WM_T_82574) {
   6068 			/* Interrupt control */
   6069 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   6070 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   6071 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   6072 
   6073 			/*
   6074 			 * Workaround issue with spurious interrupts
   6075 			 * in MSI-X mode.
   6076 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   6077 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   6078 			 */
   6079 			reg = CSR_READ(sc, WMREG_RFCTL);
   6080 			reg |= WMREG_RFCTL_ACKDIS;
   6081 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   6082 
   6083 			ivar = 0;
   6084 			/* TX and RX */
   6085 			for (i = 0; i < sc->sc_nqueues; i++) {
   6086 				wmq = &sc->sc_queue[i];
   6087 				qid = wmq->wmq_id;
   6088 				qintr_idx = wmq->wmq_intr_idx;
   6089 
   6090 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6091 				    IVAR_TX_MASK_Q_82574(qid));
   6092 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   6093 				    IVAR_RX_MASK_Q_82574(qid));
   6094 			}
   6095 			/* Link status */
   6096 			ivar |= __SHIFTIN((IVAR_VALID_82574
   6097 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   6098 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   6099 		} else {
   6100 			/* Interrupt control */
   6101 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   6102 			    | GPIE_EIAME | GPIE_PBA);
   6103 
   6104 			switch (sc->sc_type) {
   6105 			case WM_T_82580:
   6106 			case WM_T_I350:
   6107 			case WM_T_I354:
   6108 			case WM_T_I210:
   6109 			case WM_T_I211:
   6110 				/* TX and RX */
   6111 				for (i = 0; i < sc->sc_nqueues; i++) {
   6112 					wmq = &sc->sc_queue[i];
   6113 					qid = wmq->wmq_id;
   6114 					qintr_idx = wmq->wmq_intr_idx;
   6115 
   6116 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   6117 					ivar &= ~IVAR_TX_MASK_Q(qid);
   6118 					ivar |= __SHIFTIN((qintr_idx
   6119 						| IVAR_VALID),
   6120 					    IVAR_TX_MASK_Q(qid));
   6121 					ivar &= ~IVAR_RX_MASK_Q(qid);
   6122 					ivar |= __SHIFTIN((qintr_idx
   6123 						| IVAR_VALID),
   6124 					    IVAR_RX_MASK_Q(qid));
   6125 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   6126 				}
   6127 				break;
   6128 			case WM_T_82576:
   6129 				/* TX and RX */
   6130 				for (i = 0; i < sc->sc_nqueues; i++) {
   6131 					wmq = &sc->sc_queue[i];
   6132 					qid = wmq->wmq_id;
   6133 					qintr_idx = wmq->wmq_intr_idx;
   6134 
   6135 					ivar = CSR_READ(sc,
   6136 					    WMREG_IVAR_Q_82576(qid));
   6137 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   6138 					ivar |= __SHIFTIN((qintr_idx
   6139 						| IVAR_VALID),
   6140 					    IVAR_TX_MASK_Q_82576(qid));
   6141 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   6142 					ivar |= __SHIFTIN((qintr_idx
   6143 						| IVAR_VALID),
   6144 					    IVAR_RX_MASK_Q_82576(qid));
   6145 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   6146 					    ivar);
   6147 				}
   6148 				break;
   6149 			default:
   6150 				break;
   6151 			}
   6152 
   6153 			/* Link status */
   6154 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6155 			    IVAR_MISC_OTHER);
   6156 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6157 		}
   6158 
   6159 		if (wm_is_using_multiqueue(sc)) {
   6160 			wm_init_rss(sc);
   6161 
   6162 			/*
   6163 			** NOTE: Receive Full-Packet Checksum Offload
   6164 			** is mutually exclusive with Multiqueue. However
   6165 			** this is not the same as TCP/IP checksums which
   6166 			** still work.
   6167 			*/
   6168 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6169 			reg |= RXCSUM_PCSD;
   6170 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6171 		}
   6172 	}
   6173 
   6174 	/* Set up the interrupt registers. */
   6175 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6176 
   6177 	/* Enable SFP module insertion interrupt if it's required */
   6178 	if ((sc->sc_flags & WM_F_SFP) != 0) {
   6179 		sc->sc_ctrl |= CTRL_EXTLINK_EN;
   6180 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6181 		sfp_mask = ICR_GPI(0);
   6182 	}
   6183 
   6184 	if (wm_is_using_msix(sc)) {
   6185 		uint32_t mask;
   6186 		struct wm_queue *wmq;
   6187 
   6188 		switch (sc->sc_type) {
   6189 		case WM_T_82574:
   6190 			mask = 0;
   6191 			for (i = 0; i < sc->sc_nqueues; i++) {
   6192 				wmq = &sc->sc_queue[i];
   6193 				mask |= ICR_TXQ(wmq->wmq_id);
   6194 				mask |= ICR_RXQ(wmq->wmq_id);
   6195 			}
   6196 			mask |= ICR_OTHER;
   6197 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6198 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6199 			break;
   6200 		default:
   6201 			if (sc->sc_type == WM_T_82575) {
   6202 				mask = 0;
   6203 				for (i = 0; i < sc->sc_nqueues; i++) {
   6204 					wmq = &sc->sc_queue[i];
   6205 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6206 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6207 				}
   6208 				mask |= EITR_OTHER;
   6209 			} else {
   6210 				mask = 0;
   6211 				for (i = 0; i < sc->sc_nqueues; i++) {
   6212 					wmq = &sc->sc_queue[i];
   6213 					mask |= 1 << wmq->wmq_intr_idx;
   6214 				}
   6215 				mask |= 1 << sc->sc_link_intr_idx;
   6216 			}
   6217 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6218 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6219 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6220 
   6221 			/* For other interrupts */
   6222 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC | sfp_mask);
   6223 			break;
   6224 		}
   6225 	} else {
   6226 		sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6227 		    ICR_RXO | ICR_RXT0 | sfp_mask;
   6228 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6229 	}
   6230 
   6231 	/* Set up the inter-packet gap. */
   6232 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6233 
   6234 	if (sc->sc_type >= WM_T_82543) {
   6235 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6236 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6237 			wm_itrs_writereg(sc, wmq);
   6238 		}
   6239 		/*
   6240 		 * Link interrupts occur much less than TX
   6241 		 * interrupts and RX interrupts. So, we don't
   6242 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6243 		 * FreeBSD's if_igb.
   6244 		 */
   6245 	}
   6246 
   6247 	/* Set the VLAN ethernetype. */
   6248 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6249 
   6250 	/*
   6251 	 * Set up the transmit control register; we start out with
   6252 	 * a collision distance suitable for FDX, but update it whe
   6253 	 * we resolve the media type.
   6254 	 */
   6255 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6256 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6257 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6258 	if (sc->sc_type >= WM_T_82571)
   6259 		sc->sc_tctl |= TCTL_MULR;
   6260 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6261 
   6262 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6263 		/* Write TDT after TCTL.EN is set. See the document. */
   6264 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6265 	}
   6266 
   6267 	if (sc->sc_type == WM_T_80003) {
   6268 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6269 		reg &= ~TCTL_EXT_GCEX_MASK;
   6270 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6271 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6272 	}
   6273 
   6274 	/* Set the media. */
   6275 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6276 		goto out;
   6277 
   6278 	/* Configure for OS presence */
   6279 	wm_init_manageability(sc);
   6280 
   6281 	/*
   6282 	 * Set up the receive control register; we actually program the
   6283 	 * register when we set the receive filter. Use multicast address
   6284 	 * offset type 0.
   6285 	 *
   6286 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6287 	 * don't enable that feature.
   6288 	 */
   6289 	sc->sc_mchash_type = 0;
   6290 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6291 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6292 
   6293 	/* 82574 use one buffer extended Rx descriptor. */
   6294 	if (sc->sc_type == WM_T_82574)
   6295 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6296 
   6297 	/*
   6298 	 * The I350 has a bug where it always strips the CRC whether
   6299 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6300 	 */
   6301 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6302 	    || (sc->sc_type == WM_T_I210))
   6303 		sc->sc_rctl |= RCTL_SECRC;
   6304 
   6305 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6306 	    && (ifp->if_mtu > ETHERMTU)) {
   6307 		sc->sc_rctl |= RCTL_LPE;
   6308 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6309 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6310 	}
   6311 
   6312 	if (MCLBYTES == 2048)
   6313 		sc->sc_rctl |= RCTL_2k;
   6314 	else {
   6315 		if (sc->sc_type >= WM_T_82543) {
   6316 			switch (MCLBYTES) {
   6317 			case 4096:
   6318 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6319 				break;
   6320 			case 8192:
   6321 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6322 				break;
   6323 			case 16384:
   6324 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6325 				break;
   6326 			default:
   6327 				panic("wm_init: MCLBYTES %d unsupported",
   6328 				    MCLBYTES);
   6329 				break;
   6330 			}
   6331 		} else
   6332 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6333 	}
   6334 
   6335 	/* Enable ECC */
   6336 	switch (sc->sc_type) {
   6337 	case WM_T_82571:
   6338 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6339 		reg |= PBA_ECC_CORR_EN;
   6340 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6341 		break;
   6342 	case WM_T_PCH_LPT:
   6343 	case WM_T_PCH_SPT:
   6344 	case WM_T_PCH_CNP:
   6345 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6346 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6347 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6348 
   6349 		sc->sc_ctrl |= CTRL_MEHE;
   6350 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6351 		break;
   6352 	default:
   6353 		break;
   6354 	}
   6355 
   6356 	/*
   6357 	 * Set the receive filter.
   6358 	 *
   6359 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6360 	 * the setting of RCTL.EN in wm_set_filter()
   6361 	 */
   6362 	wm_set_filter(sc);
   6363 
   6364 	/* On 575 and later set RDT only if RX enabled */
   6365 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6366 		int qidx;
   6367 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6368 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6369 			for (i = 0; i < WM_NRXDESC; i++) {
   6370 				mutex_enter(rxq->rxq_lock);
   6371 				wm_init_rxdesc(rxq, i);
   6372 				mutex_exit(rxq->rxq_lock);
   6373 
   6374 			}
   6375 		}
   6376 	}
   6377 
   6378 	wm_unset_stopping_flags(sc);
   6379 
   6380 	/* Start the one second link check clock. */
   6381 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6382 
   6383 	/* ...all done! */
   6384 	ifp->if_flags |= IFF_RUNNING;
   6385 	ifp->if_flags &= ~IFF_OACTIVE;
   6386 
   6387  out:
   6388 	/* Save last flags for the callback */
   6389 	sc->sc_if_flags = ifp->if_flags;
   6390 	sc->sc_ec_capenable = ec->ec_capenable;
   6391 	if (error)
   6392 		log(LOG_ERR, "%s: interface not running\n",
   6393 		    device_xname(sc->sc_dev));
   6394 	return error;
   6395 }
   6396 
   6397 /*
   6398  * wm_stop:		[ifnet interface function]
   6399  *
   6400  *	Stop transmission on the interface.
   6401  */
   6402 static void
   6403 wm_stop(struct ifnet *ifp, int disable)
   6404 {
   6405 	struct wm_softc *sc = ifp->if_softc;
   6406 
   6407 	WM_CORE_LOCK(sc);
   6408 	wm_stop_locked(ifp, disable);
   6409 	WM_CORE_UNLOCK(sc);
   6410 
   6411 	/*
   6412 	 * After wm_set_stopping_flags(), it is guaranteed
   6413 	 * wm_handle_queue_work() does not call workqueue_enqueue().
   6414 	 * However, workqueue_wait() cannot call in wm_stop_locked()
   6415 	 * because it can sleep...
   6416 	 * so, call workqueue_wait() here.
   6417 	 */
   6418 	for (int i = 0; i < sc->sc_nqueues; i++)
   6419 		workqueue_wait(sc->sc_queue_wq, &sc->sc_queue[i].wmq_cookie);
   6420 }
   6421 
   6422 static void
   6423 wm_stop_locked(struct ifnet *ifp, int disable)
   6424 {
   6425 	struct wm_softc *sc = ifp->if_softc;
   6426 	struct wm_txsoft *txs;
   6427 	int i, qidx;
   6428 
   6429 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6430 		device_xname(sc->sc_dev), __func__));
   6431 	KASSERT(WM_CORE_LOCKED(sc));
   6432 
   6433 	wm_set_stopping_flags(sc);
   6434 
   6435 	/* Stop the one second clock. */
   6436 	callout_stop(&sc->sc_tick_ch);
   6437 
   6438 	/* Stop the 82547 Tx FIFO stall check timer. */
   6439 	if (sc->sc_type == WM_T_82547)
   6440 		callout_stop(&sc->sc_txfifo_ch);
   6441 
   6442 	if (sc->sc_flags & WM_F_HAS_MII) {
   6443 		/* Down the MII. */
   6444 		mii_down(&sc->sc_mii);
   6445 	} else {
   6446 #if 0
   6447 		/* Should we clear PHY's status properly? */
   6448 		wm_reset(sc);
   6449 #endif
   6450 	}
   6451 
   6452 	/* Stop the transmit and receive processes. */
   6453 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6454 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6455 	sc->sc_rctl &= ~RCTL_EN;
   6456 
   6457 	/*
   6458 	 * Clear the interrupt mask to ensure the device cannot assert its
   6459 	 * interrupt line.
   6460 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6461 	 * service any currently pending or shared interrupt.
   6462 	 */
   6463 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6464 	sc->sc_icr = 0;
   6465 	if (wm_is_using_msix(sc)) {
   6466 		if (sc->sc_type != WM_T_82574) {
   6467 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6468 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6469 		} else
   6470 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6471 	}
   6472 
   6473 	/* Release any queued transmit buffers. */
   6474 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6475 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6476 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6477 		mutex_enter(txq->txq_lock);
   6478 		txq->txq_sending = false; /* Ensure watchdog disabled */
   6479 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6480 			txs = &txq->txq_soft[i];
   6481 			if (txs->txs_mbuf != NULL) {
   6482 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6483 				m_freem(txs->txs_mbuf);
   6484 				txs->txs_mbuf = NULL;
   6485 			}
   6486 		}
   6487 		mutex_exit(txq->txq_lock);
   6488 	}
   6489 
   6490 	/* Mark the interface as down and cancel the watchdog timer. */
   6491 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6492 
   6493 	if (disable) {
   6494 		for (i = 0; i < sc->sc_nqueues; i++) {
   6495 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6496 			mutex_enter(rxq->rxq_lock);
   6497 			wm_rxdrain(rxq);
   6498 			mutex_exit(rxq->rxq_lock);
   6499 		}
   6500 	}
   6501 
   6502 #if 0 /* notyet */
   6503 	if (sc->sc_type >= WM_T_82544)
   6504 		CSR_WRITE(sc, WMREG_WUC, 0);
   6505 #endif
   6506 }
   6507 
   6508 static void
   6509 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6510 {
   6511 	struct mbuf *m;
   6512 	int i;
   6513 
   6514 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6515 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6516 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6517 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6518 		    m->m_data, m->m_len, m->m_flags);
   6519 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6520 	    i, i == 1 ? "" : "s");
   6521 }
   6522 
   6523 /*
   6524  * wm_82547_txfifo_stall:
   6525  *
   6526  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6527  *	reset the FIFO pointers, and restart packet transmission.
   6528  */
   6529 static void
   6530 wm_82547_txfifo_stall(void *arg)
   6531 {
   6532 	struct wm_softc *sc = arg;
   6533 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6534 
   6535 	mutex_enter(txq->txq_lock);
   6536 
   6537 	if (txq->txq_stopping)
   6538 		goto out;
   6539 
   6540 	if (txq->txq_fifo_stall) {
   6541 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6542 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6543 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6544 			/*
   6545 			 * Packets have drained.  Stop transmitter, reset
   6546 			 * FIFO pointers, restart transmitter, and kick
   6547 			 * the packet queue.
   6548 			 */
   6549 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6550 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6551 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6552 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6553 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6554 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6555 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6556 			CSR_WRITE_FLUSH(sc);
   6557 
   6558 			txq->txq_fifo_head = 0;
   6559 			txq->txq_fifo_stall = 0;
   6560 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6561 		} else {
   6562 			/*
   6563 			 * Still waiting for packets to drain; try again in
   6564 			 * another tick.
   6565 			 */
   6566 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6567 		}
   6568 	}
   6569 
   6570 out:
   6571 	mutex_exit(txq->txq_lock);
   6572 }
   6573 
   6574 /*
   6575  * wm_82547_txfifo_bugchk:
   6576  *
   6577  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6578  *	prevent enqueueing a packet that would wrap around the end
   6579  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6580  *
   6581  *	We do this by checking the amount of space before the end
   6582  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6583  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6584  *	the internal FIFO pointers to the beginning, and restart
   6585  *	transmission on the interface.
   6586  */
   6587 #define	WM_FIFO_HDR		0x10
   6588 #define	WM_82547_PAD_LEN	0x3e0
   6589 static int
   6590 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6591 {
   6592 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6593 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6594 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6595 
   6596 	/* Just return if already stalled. */
   6597 	if (txq->txq_fifo_stall)
   6598 		return 1;
   6599 
   6600 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6601 		/* Stall only occurs in half-duplex mode. */
   6602 		goto send_packet;
   6603 	}
   6604 
   6605 	if (len >= WM_82547_PAD_LEN + space) {
   6606 		txq->txq_fifo_stall = 1;
   6607 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6608 		return 1;
   6609 	}
   6610 
   6611  send_packet:
   6612 	txq->txq_fifo_head += len;
   6613 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6614 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6615 
   6616 	return 0;
   6617 }
   6618 
   6619 static int
   6620 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6621 {
   6622 	int error;
   6623 
   6624 	/*
   6625 	 * Allocate the control data structures, and create and load the
   6626 	 * DMA map for it.
   6627 	 *
   6628 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6629 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6630 	 * both sets within the same 4G segment.
   6631 	 */
   6632 	if (sc->sc_type < WM_T_82544)
   6633 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6634 	else
   6635 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6636 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6637 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6638 	else
   6639 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6640 
   6641 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6642 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6643 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6644 		aprint_error_dev(sc->sc_dev,
   6645 		    "unable to allocate TX control data, error = %d\n",
   6646 		    error);
   6647 		goto fail_0;
   6648 	}
   6649 
   6650 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6651 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6652 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6653 		aprint_error_dev(sc->sc_dev,
   6654 		    "unable to map TX control data, error = %d\n", error);
   6655 		goto fail_1;
   6656 	}
   6657 
   6658 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6659 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6660 		aprint_error_dev(sc->sc_dev,
   6661 		    "unable to create TX control data DMA map, error = %d\n",
   6662 		    error);
   6663 		goto fail_2;
   6664 	}
   6665 
   6666 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6667 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6668 		aprint_error_dev(sc->sc_dev,
   6669 		    "unable to load TX control data DMA map, error = %d\n",
   6670 		    error);
   6671 		goto fail_3;
   6672 	}
   6673 
   6674 	return 0;
   6675 
   6676  fail_3:
   6677 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6678  fail_2:
   6679 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6680 	    WM_TXDESCS_SIZE(txq));
   6681  fail_1:
   6682 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6683  fail_0:
   6684 	return error;
   6685 }
   6686 
   6687 static void
   6688 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6689 {
   6690 
   6691 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6692 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6693 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6694 	    WM_TXDESCS_SIZE(txq));
   6695 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6696 }
   6697 
   6698 static int
   6699 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6700 {
   6701 	int error;
   6702 	size_t rxq_descs_size;
   6703 
   6704 	/*
   6705 	 * Allocate the control data structures, and create and load the
   6706 	 * DMA map for it.
   6707 	 *
   6708 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6709 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6710 	 * both sets within the same 4G segment.
   6711 	 */
   6712 	rxq->rxq_ndesc = WM_NRXDESC;
   6713 	if (sc->sc_type == WM_T_82574)
   6714 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6715 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6716 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6717 	else
   6718 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6719 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6720 
   6721 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6722 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6723 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6724 		aprint_error_dev(sc->sc_dev,
   6725 		    "unable to allocate RX control data, error = %d\n",
   6726 		    error);
   6727 		goto fail_0;
   6728 	}
   6729 
   6730 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6731 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6732 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6733 		aprint_error_dev(sc->sc_dev,
   6734 		    "unable to map RX control data, error = %d\n", error);
   6735 		goto fail_1;
   6736 	}
   6737 
   6738 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6739 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6740 		aprint_error_dev(sc->sc_dev,
   6741 		    "unable to create RX control data DMA map, error = %d\n",
   6742 		    error);
   6743 		goto fail_2;
   6744 	}
   6745 
   6746 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6747 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6748 		aprint_error_dev(sc->sc_dev,
   6749 		    "unable to load RX control data DMA map, error = %d\n",
   6750 		    error);
   6751 		goto fail_3;
   6752 	}
   6753 
   6754 	return 0;
   6755 
   6756  fail_3:
   6757 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6758  fail_2:
   6759 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6760 	    rxq_descs_size);
   6761  fail_1:
   6762 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6763  fail_0:
   6764 	return error;
   6765 }
   6766 
   6767 static void
   6768 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6769 {
   6770 
   6771 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6772 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6773 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6774 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6775 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6776 }
   6777 
   6778 
   6779 static int
   6780 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6781 {
   6782 	int i, error;
   6783 
   6784 	/* Create the transmit buffer DMA maps. */
   6785 	WM_TXQUEUELEN(txq) =
   6786 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6787 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6788 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6789 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6790 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6791 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6792 			aprint_error_dev(sc->sc_dev,
   6793 			    "unable to create Tx DMA map %d, error = %d\n",
   6794 			    i, error);
   6795 			goto fail;
   6796 		}
   6797 	}
   6798 
   6799 	return 0;
   6800 
   6801  fail:
   6802 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6803 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6804 			bus_dmamap_destroy(sc->sc_dmat,
   6805 			    txq->txq_soft[i].txs_dmamap);
   6806 	}
   6807 	return error;
   6808 }
   6809 
   6810 static void
   6811 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6812 {
   6813 	int i;
   6814 
   6815 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6816 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6817 			bus_dmamap_destroy(sc->sc_dmat,
   6818 			    txq->txq_soft[i].txs_dmamap);
   6819 	}
   6820 }
   6821 
   6822 static int
   6823 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6824 {
   6825 	int i, error;
   6826 
   6827 	/* Create the receive buffer DMA maps. */
   6828 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6829 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6830 			    MCLBYTES, 0, 0,
   6831 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6832 			aprint_error_dev(sc->sc_dev,
   6833 			    "unable to create Rx DMA map %d error = %d\n",
   6834 			    i, error);
   6835 			goto fail;
   6836 		}
   6837 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6838 	}
   6839 
   6840 	return 0;
   6841 
   6842  fail:
   6843 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6844 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6845 			bus_dmamap_destroy(sc->sc_dmat,
   6846 			    rxq->rxq_soft[i].rxs_dmamap);
   6847 	}
   6848 	return error;
   6849 }
   6850 
   6851 static void
   6852 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6853 {
   6854 	int i;
   6855 
   6856 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6857 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6858 			bus_dmamap_destroy(sc->sc_dmat,
   6859 			    rxq->rxq_soft[i].rxs_dmamap);
   6860 	}
   6861 }
   6862 
   6863 /*
   6864  * wm_alloc_quques:
   6865  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6866  */
   6867 static int
   6868 wm_alloc_txrx_queues(struct wm_softc *sc)
   6869 {
   6870 	int i, error, tx_done, rx_done;
   6871 
   6872 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6873 	    KM_SLEEP);
   6874 	if (sc->sc_queue == NULL) {
   6875 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6876 		error = ENOMEM;
   6877 		goto fail_0;
   6878 	}
   6879 
   6880 	/* For transmission */
   6881 	error = 0;
   6882 	tx_done = 0;
   6883 	for (i = 0; i < sc->sc_nqueues; i++) {
   6884 #ifdef WM_EVENT_COUNTERS
   6885 		int j;
   6886 		const char *xname;
   6887 #endif
   6888 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6889 		txq->txq_sc = sc;
   6890 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6891 
   6892 		error = wm_alloc_tx_descs(sc, txq);
   6893 		if (error)
   6894 			break;
   6895 		error = wm_alloc_tx_buffer(sc, txq);
   6896 		if (error) {
   6897 			wm_free_tx_descs(sc, txq);
   6898 			break;
   6899 		}
   6900 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6901 		if (txq->txq_interq == NULL) {
   6902 			wm_free_tx_descs(sc, txq);
   6903 			wm_free_tx_buffer(sc, txq);
   6904 			error = ENOMEM;
   6905 			break;
   6906 		}
   6907 
   6908 #ifdef WM_EVENT_COUNTERS
   6909 		xname = device_xname(sc->sc_dev);
   6910 
   6911 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6912 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6913 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6914 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6915 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6916 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6917 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6918 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6919 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6920 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6921 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6922 
   6923 		for (j = 0; j < WM_NTXSEGS; j++) {
   6924 			snprintf(txq->txq_txseg_evcnt_names[j],
   6925 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6926 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6927 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6928 		}
   6929 
   6930 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6931 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6932 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6933 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6934 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6935 #endif /* WM_EVENT_COUNTERS */
   6936 
   6937 		tx_done++;
   6938 	}
   6939 	if (error)
   6940 		goto fail_1;
   6941 
   6942 	/* For receive */
   6943 	error = 0;
   6944 	rx_done = 0;
   6945 	for (i = 0; i < sc->sc_nqueues; i++) {
   6946 #ifdef WM_EVENT_COUNTERS
   6947 		const char *xname;
   6948 #endif
   6949 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6950 		rxq->rxq_sc = sc;
   6951 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6952 
   6953 		error = wm_alloc_rx_descs(sc, rxq);
   6954 		if (error)
   6955 			break;
   6956 
   6957 		error = wm_alloc_rx_buffer(sc, rxq);
   6958 		if (error) {
   6959 			wm_free_rx_descs(sc, rxq);
   6960 			break;
   6961 		}
   6962 
   6963 #ifdef WM_EVENT_COUNTERS
   6964 		xname = device_xname(sc->sc_dev);
   6965 
   6966 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6967 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6968 
   6969 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6970 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6971 #endif /* WM_EVENT_COUNTERS */
   6972 
   6973 		rx_done++;
   6974 	}
   6975 	if (error)
   6976 		goto fail_2;
   6977 
   6978 	for (i = 0; i < sc->sc_nqueues; i++) {
   6979 		char rndname[16];
   6980 
   6981 		snprintf(rndname, sizeof(rndname), "%sTXRX%d",
   6982 		    device_xname(sc->sc_dev), i);
   6983 		rnd_attach_source(&sc->sc_queue[i].rnd_source, rndname,
   6984 		    RND_TYPE_NET, RND_FLAG_DEFAULT);
   6985 	}
   6986 
   6987 	return 0;
   6988 
   6989  fail_2:
   6990 	for (i = 0; i < rx_done; i++) {
   6991 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6992 		wm_free_rx_buffer(sc, rxq);
   6993 		wm_free_rx_descs(sc, rxq);
   6994 		if (rxq->rxq_lock)
   6995 			mutex_obj_free(rxq->rxq_lock);
   6996 	}
   6997  fail_1:
   6998 	for (i = 0; i < tx_done; i++) {
   6999 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7000 		pcq_destroy(txq->txq_interq);
   7001 		wm_free_tx_buffer(sc, txq);
   7002 		wm_free_tx_descs(sc, txq);
   7003 		if (txq->txq_lock)
   7004 			mutex_obj_free(txq->txq_lock);
   7005 	}
   7006 
   7007 	kmem_free(sc->sc_queue,
   7008 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   7009  fail_0:
   7010 	return error;
   7011 }
   7012 
   7013 /*
   7014  * wm_free_quques:
   7015  *	Free {tx,rx}descs and {tx,rx} buffers
   7016  */
   7017 static void
   7018 wm_free_txrx_queues(struct wm_softc *sc)
   7019 {
   7020 	int i;
   7021 
   7022 	for (i = 0; i < sc->sc_nqueues; i++)
   7023 		rnd_detach_source(&sc->sc_queue[i].rnd_source);
   7024 
   7025 	for (i = 0; i < sc->sc_nqueues; i++) {
   7026 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   7027 
   7028 #ifdef WM_EVENT_COUNTERS
   7029 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   7030 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   7031 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   7032 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   7033 #endif /* WM_EVENT_COUNTERS */
   7034 
   7035 		wm_free_rx_buffer(sc, rxq);
   7036 		wm_free_rx_descs(sc, rxq);
   7037 		if (rxq->rxq_lock)
   7038 			mutex_obj_free(rxq->rxq_lock);
   7039 	}
   7040 
   7041 	for (i = 0; i < sc->sc_nqueues; i++) {
   7042 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   7043 		struct mbuf *m;
   7044 #ifdef WM_EVENT_COUNTERS
   7045 		int j;
   7046 
   7047 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   7048 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   7049 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   7050 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   7051 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   7052 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   7053 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   7054 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   7055 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   7056 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   7057 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   7058 
   7059 		for (j = 0; j < WM_NTXSEGS; j++)
   7060 			evcnt_detach(&txq->txq_ev_txseg[j]);
   7061 
   7062 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   7063 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   7064 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   7065 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   7066 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   7067 #endif /* WM_EVENT_COUNTERS */
   7068 
   7069 		/* Drain txq_interq */
   7070 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   7071 			m_freem(m);
   7072 		pcq_destroy(txq->txq_interq);
   7073 
   7074 		wm_free_tx_buffer(sc, txq);
   7075 		wm_free_tx_descs(sc, txq);
   7076 		if (txq->txq_lock)
   7077 			mutex_obj_free(txq->txq_lock);
   7078 	}
   7079 
   7080 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   7081 }
   7082 
   7083 static void
   7084 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7085 {
   7086 
   7087 	KASSERT(mutex_owned(txq->txq_lock));
   7088 
   7089 	/* Initialize the transmit descriptor ring. */
   7090 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   7091 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   7092 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7093 	txq->txq_free = WM_NTXDESC(txq);
   7094 	txq->txq_next = 0;
   7095 }
   7096 
   7097 static void
   7098 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7099     struct wm_txqueue *txq)
   7100 {
   7101 
   7102 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7103 		device_xname(sc->sc_dev), __func__));
   7104 	KASSERT(mutex_owned(txq->txq_lock));
   7105 
   7106 	if (sc->sc_type < WM_T_82543) {
   7107 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   7108 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   7109 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   7110 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   7111 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   7112 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   7113 	} else {
   7114 		int qid = wmq->wmq_id;
   7115 
   7116 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   7117 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   7118 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   7119 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   7120 
   7121 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7122 			/*
   7123 			 * Don't write TDT before TCTL.EN is set.
   7124 			 * See the document.
   7125 			 */
   7126 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   7127 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   7128 			    | TXDCTL_WTHRESH(0));
   7129 		else {
   7130 			/* XXX should update with AIM? */
   7131 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   7132 			if (sc->sc_type >= WM_T_82540) {
   7133 				/* Should be the same */
   7134 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   7135 			}
   7136 
   7137 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   7138 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   7139 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   7140 		}
   7141 	}
   7142 }
   7143 
   7144 static void
   7145 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   7146 {
   7147 	int i;
   7148 
   7149 	KASSERT(mutex_owned(txq->txq_lock));
   7150 
   7151 	/* Initialize the transmit job descriptors. */
   7152 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   7153 		txq->txq_soft[i].txs_mbuf = NULL;
   7154 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   7155 	txq->txq_snext = 0;
   7156 	txq->txq_sdirty = 0;
   7157 }
   7158 
   7159 static void
   7160 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7161     struct wm_txqueue *txq)
   7162 {
   7163 
   7164 	KASSERT(mutex_owned(txq->txq_lock));
   7165 
   7166 	/*
   7167 	 * Set up some register offsets that are different between
   7168 	 * the i82542 and the i82543 and later chips.
   7169 	 */
   7170 	if (sc->sc_type < WM_T_82543)
   7171 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   7172 	else
   7173 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7174 
   7175 	wm_init_tx_descs(sc, txq);
   7176 	wm_init_tx_regs(sc, wmq, txq);
   7177 	wm_init_tx_buffer(sc, txq);
   7178 
   7179 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7180 	txq->txq_sending = false;
   7181 }
   7182 
   7183 static void
   7184 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7185     struct wm_rxqueue *rxq)
   7186 {
   7187 
   7188 	KASSERT(mutex_owned(rxq->rxq_lock));
   7189 
   7190 	/*
   7191 	 * Initialize the receive descriptor and receive job
   7192 	 * descriptor rings.
   7193 	 */
   7194 	if (sc->sc_type < WM_T_82543) {
   7195 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7196 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7197 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7198 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7199 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7200 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7201 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7202 
   7203 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7204 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7205 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7206 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7207 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7208 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7209 	} else {
   7210 		int qid = wmq->wmq_id;
   7211 
   7212 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7213 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7214 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7215 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7216 
   7217 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7218 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7219 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7220 
   7221 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7222 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7223 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7224 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7225 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7226 			    | RXDCTL_WTHRESH(1));
   7227 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7228 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7229 		} else {
   7230 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7231 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7232 			/* XXX should update with AIM? */
   7233 			CSR_WRITE(sc, WMREG_RDTR,
   7234 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7235 			/* MUST be same */
   7236 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7237 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7238 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7239 		}
   7240 	}
   7241 }
   7242 
   7243 static int
   7244 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7245 {
   7246 	struct wm_rxsoft *rxs;
   7247 	int error, i;
   7248 
   7249 	KASSERT(mutex_owned(rxq->rxq_lock));
   7250 
   7251 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7252 		rxs = &rxq->rxq_soft[i];
   7253 		if (rxs->rxs_mbuf == NULL) {
   7254 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7255 				log(LOG_ERR, "%s: unable to allocate or map "
   7256 				    "rx buffer %d, error = %d\n",
   7257 				    device_xname(sc->sc_dev), i, error);
   7258 				/*
   7259 				 * XXX Should attempt to run with fewer receive
   7260 				 * XXX buffers instead of just failing.
   7261 				 */
   7262 				wm_rxdrain(rxq);
   7263 				return ENOMEM;
   7264 			}
   7265 		} else {
   7266 			/*
   7267 			 * For 82575 and 82576, the RX descriptors must be
   7268 			 * initialized after the setting of RCTL.EN in
   7269 			 * wm_set_filter()
   7270 			 */
   7271 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7272 				wm_init_rxdesc(rxq, i);
   7273 		}
   7274 	}
   7275 	rxq->rxq_ptr = 0;
   7276 	rxq->rxq_discard = 0;
   7277 	WM_RXCHAIN_RESET(rxq);
   7278 
   7279 	return 0;
   7280 }
   7281 
   7282 static int
   7283 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7284     struct wm_rxqueue *rxq)
   7285 {
   7286 
   7287 	KASSERT(mutex_owned(rxq->rxq_lock));
   7288 
   7289 	/*
   7290 	 * Set up some register offsets that are different between
   7291 	 * the i82542 and the i82543 and later chips.
   7292 	 */
   7293 	if (sc->sc_type < WM_T_82543)
   7294 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7295 	else
   7296 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7297 
   7298 	wm_init_rx_regs(sc, wmq, rxq);
   7299 	return wm_init_rx_buffer(sc, rxq);
   7300 }
   7301 
   7302 /*
   7303  * wm_init_quques:
   7304  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7305  */
   7306 static int
   7307 wm_init_txrx_queues(struct wm_softc *sc)
   7308 {
   7309 	int i, error = 0;
   7310 
   7311 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7312 		device_xname(sc->sc_dev), __func__));
   7313 
   7314 	for (i = 0; i < sc->sc_nqueues; i++) {
   7315 		struct wm_queue *wmq = &sc->sc_queue[i];
   7316 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7317 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7318 
   7319 		/*
   7320 		 * TODO
   7321 		 * Currently, use constant variable instead of AIM.
   7322 		 * Furthermore, the interrupt interval of multiqueue which use
   7323 		 * polling mode is less than default value.
   7324 		 * More tuning and AIM are required.
   7325 		 */
   7326 		if (wm_is_using_multiqueue(sc))
   7327 			wmq->wmq_itr = 50;
   7328 		else
   7329 			wmq->wmq_itr = sc->sc_itr_init;
   7330 		wmq->wmq_set_itr = true;
   7331 
   7332 		mutex_enter(txq->txq_lock);
   7333 		wm_init_tx_queue(sc, wmq, txq);
   7334 		mutex_exit(txq->txq_lock);
   7335 
   7336 		mutex_enter(rxq->rxq_lock);
   7337 		error = wm_init_rx_queue(sc, wmq, rxq);
   7338 		mutex_exit(rxq->rxq_lock);
   7339 		if (error)
   7340 			break;
   7341 	}
   7342 
   7343 	return error;
   7344 }
   7345 
   7346 /*
   7347  * wm_tx_offload:
   7348  *
   7349  *	Set up TCP/IP checksumming parameters for the
   7350  *	specified packet.
   7351  */
   7352 static int
   7353 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7354     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7355 {
   7356 	struct mbuf *m0 = txs->txs_mbuf;
   7357 	struct livengood_tcpip_ctxdesc *t;
   7358 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7359 	uint32_t ipcse;
   7360 	struct ether_header *eh;
   7361 	int offset, iphl;
   7362 	uint8_t fields;
   7363 
   7364 	/*
   7365 	 * XXX It would be nice if the mbuf pkthdr had offset
   7366 	 * fields for the protocol headers.
   7367 	 */
   7368 
   7369 	eh = mtod(m0, struct ether_header *);
   7370 	switch (htons(eh->ether_type)) {
   7371 	case ETHERTYPE_IP:
   7372 	case ETHERTYPE_IPV6:
   7373 		offset = ETHER_HDR_LEN;
   7374 		break;
   7375 
   7376 	case ETHERTYPE_VLAN:
   7377 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7378 		break;
   7379 
   7380 	default:
   7381 		/* Don't support this protocol or encapsulation. */
   7382 		*fieldsp = 0;
   7383 		*cmdp = 0;
   7384 		return 0;
   7385 	}
   7386 
   7387 	if ((m0->m_pkthdr.csum_flags &
   7388 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7389 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7390 	} else
   7391 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7392 
   7393 	ipcse = offset + iphl - 1;
   7394 
   7395 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7396 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7397 	seg = 0;
   7398 	fields = 0;
   7399 
   7400 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7401 		int hlen = offset + iphl;
   7402 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7403 
   7404 		if (__predict_false(m0->m_len <
   7405 				    (hlen + sizeof(struct tcphdr)))) {
   7406 			/*
   7407 			 * TCP/IP headers are not in the first mbuf; we need
   7408 			 * to do this the slow and painful way. Let's just
   7409 			 * hope this doesn't happen very often.
   7410 			 */
   7411 			struct tcphdr th;
   7412 
   7413 			WM_Q_EVCNT_INCR(txq, tsopain);
   7414 
   7415 			m_copydata(m0, hlen, sizeof(th), &th);
   7416 			if (v4) {
   7417 				struct ip ip;
   7418 
   7419 				m_copydata(m0, offset, sizeof(ip), &ip);
   7420 				ip.ip_len = 0;
   7421 				m_copyback(m0,
   7422 				    offset + offsetof(struct ip, ip_len),
   7423 				    sizeof(ip.ip_len), &ip.ip_len);
   7424 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7425 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7426 			} else {
   7427 				struct ip6_hdr ip6;
   7428 
   7429 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7430 				ip6.ip6_plen = 0;
   7431 				m_copyback(m0,
   7432 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7433 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7434 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7435 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7436 			}
   7437 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7438 			    sizeof(th.th_sum), &th.th_sum);
   7439 
   7440 			hlen += th.th_off << 2;
   7441 		} else {
   7442 			/*
   7443 			 * TCP/IP headers are in the first mbuf; we can do
   7444 			 * this the easy way.
   7445 			 */
   7446 			struct tcphdr *th;
   7447 
   7448 			if (v4) {
   7449 				struct ip *ip =
   7450 				    (void *)(mtod(m0, char *) + offset);
   7451 				th = (void *)(mtod(m0, char *) + hlen);
   7452 
   7453 				ip->ip_len = 0;
   7454 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7455 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7456 			} else {
   7457 				struct ip6_hdr *ip6 =
   7458 				    (void *)(mtod(m0, char *) + offset);
   7459 				th = (void *)(mtod(m0, char *) + hlen);
   7460 
   7461 				ip6->ip6_plen = 0;
   7462 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7463 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7464 			}
   7465 			hlen += th->th_off << 2;
   7466 		}
   7467 
   7468 		if (v4) {
   7469 			WM_Q_EVCNT_INCR(txq, tso);
   7470 			cmdlen |= WTX_TCPIP_CMD_IP;
   7471 		} else {
   7472 			WM_Q_EVCNT_INCR(txq, tso6);
   7473 			ipcse = 0;
   7474 		}
   7475 		cmd |= WTX_TCPIP_CMD_TSE;
   7476 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7477 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7478 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7479 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7480 	}
   7481 
   7482 	/*
   7483 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7484 	 * offload feature, if we load the context descriptor, we
   7485 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7486 	 */
   7487 
   7488 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7489 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7490 	    WTX_TCPIP_IPCSE(ipcse);
   7491 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7492 		WM_Q_EVCNT_INCR(txq, ipsum);
   7493 		fields |= WTX_IXSM;
   7494 	}
   7495 
   7496 	offset += iphl;
   7497 
   7498 	if (m0->m_pkthdr.csum_flags &
   7499 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7500 		WM_Q_EVCNT_INCR(txq, tusum);
   7501 		fields |= WTX_TXSM;
   7502 		tucs = WTX_TCPIP_TUCSS(offset) |
   7503 		    WTX_TCPIP_TUCSO(offset +
   7504 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7505 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7506 	} else if ((m0->m_pkthdr.csum_flags &
   7507 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7508 		WM_Q_EVCNT_INCR(txq, tusum6);
   7509 		fields |= WTX_TXSM;
   7510 		tucs = WTX_TCPIP_TUCSS(offset) |
   7511 		    WTX_TCPIP_TUCSO(offset +
   7512 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7513 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7514 	} else {
   7515 		/* Just initialize it to a valid TCP context. */
   7516 		tucs = WTX_TCPIP_TUCSS(offset) |
   7517 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7518 		    WTX_TCPIP_TUCSE(0) /* Rest of packet */;
   7519 	}
   7520 
   7521 	/*
   7522 	 * We don't have to write context descriptor for every packet
   7523 	 * except for 82574. For 82574, we must write context descriptor
   7524 	 * for every packet when we use two descriptor queues.
   7525 	 * It would be overhead to write context descriptor for every packet,
   7526 	 * however it does not cause problems.
   7527 	 */
   7528 	/* Fill in the context descriptor. */
   7529 	t = (struct livengood_tcpip_ctxdesc *)
   7530 	    &txq->txq_descs[txq->txq_next];
   7531 	t->tcpip_ipcs = htole32(ipcs);
   7532 	t->tcpip_tucs = htole32(tucs);
   7533 	t->tcpip_cmdlen = htole32(cmdlen);
   7534 	t->tcpip_seg = htole32(seg);
   7535 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7536 
   7537 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7538 	txs->txs_ndesc++;
   7539 
   7540 	*cmdp = cmd;
   7541 	*fieldsp = fields;
   7542 
   7543 	return 0;
   7544 }
   7545 
   7546 static inline int
   7547 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7548 {
   7549 	struct wm_softc *sc = ifp->if_softc;
   7550 	u_int cpuid = cpu_index(curcpu());
   7551 
   7552 	/*
   7553 	 * Currently, simple distribute strategy.
   7554 	 * TODO:
   7555 	 * distribute by flowid(RSS has value).
   7556 	 */
   7557 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7558 }
   7559 
   7560 /*
   7561  * wm_start:		[ifnet interface function]
   7562  *
   7563  *	Start packet transmission on the interface.
   7564  */
   7565 static void
   7566 wm_start(struct ifnet *ifp)
   7567 {
   7568 	struct wm_softc *sc = ifp->if_softc;
   7569 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7570 
   7571 #ifdef WM_MPSAFE
   7572 	KASSERT(if_is_mpsafe(ifp));
   7573 #endif
   7574 	/*
   7575 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   7576 	 */
   7577 
   7578 	mutex_enter(txq->txq_lock);
   7579 	if (!txq->txq_stopping)
   7580 		wm_start_locked(ifp);
   7581 	mutex_exit(txq->txq_lock);
   7582 }
   7583 
   7584 static void
   7585 wm_start_locked(struct ifnet *ifp)
   7586 {
   7587 	struct wm_softc *sc = ifp->if_softc;
   7588 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7589 
   7590 	wm_send_common_locked(ifp, txq, false);
   7591 }
   7592 
   7593 static int
   7594 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7595 {
   7596 	int qid;
   7597 	struct wm_softc *sc = ifp->if_softc;
   7598 	struct wm_txqueue *txq;
   7599 
   7600 	qid = wm_select_txqueue(ifp, m);
   7601 	txq = &sc->sc_queue[qid].wmq_txq;
   7602 
   7603 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7604 		m_freem(m);
   7605 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7606 		return ENOBUFS;
   7607 	}
   7608 
   7609 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   7610 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   7611 	if (m->m_flags & M_MCAST)
   7612 		if_statinc_ref(nsr, if_omcasts);
   7613 	IF_STAT_PUTREF(ifp);
   7614 
   7615 	if (mutex_tryenter(txq->txq_lock)) {
   7616 		if (!txq->txq_stopping)
   7617 			wm_transmit_locked(ifp, txq);
   7618 		mutex_exit(txq->txq_lock);
   7619 	}
   7620 
   7621 	return 0;
   7622 }
   7623 
   7624 static void
   7625 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7626 {
   7627 
   7628 	wm_send_common_locked(ifp, txq, true);
   7629 }
   7630 
   7631 static void
   7632 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7633     bool is_transmit)
   7634 {
   7635 	struct wm_softc *sc = ifp->if_softc;
   7636 	struct mbuf *m0;
   7637 	struct wm_txsoft *txs;
   7638 	bus_dmamap_t dmamap;
   7639 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7640 	bus_addr_t curaddr;
   7641 	bus_size_t seglen, curlen;
   7642 	uint32_t cksumcmd;
   7643 	uint8_t cksumfields;
   7644 	bool remap = true;
   7645 
   7646 	KASSERT(mutex_owned(txq->txq_lock));
   7647 
   7648 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7649 		return;
   7650 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7651 		return;
   7652 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7653 		return;
   7654 
   7655 	/* Remember the previous number of free descriptors. */
   7656 	ofree = txq->txq_free;
   7657 
   7658 	/*
   7659 	 * Loop through the send queue, setting up transmit descriptors
   7660 	 * until we drain the queue, or use up all available transmit
   7661 	 * descriptors.
   7662 	 */
   7663 	for (;;) {
   7664 		m0 = NULL;
   7665 
   7666 		/* Get a work queue entry. */
   7667 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7668 			wm_txeof(txq, UINT_MAX);
   7669 			if (txq->txq_sfree == 0) {
   7670 				DPRINTF(WM_DEBUG_TX,
   7671 				    ("%s: TX: no free job descriptors\n",
   7672 					device_xname(sc->sc_dev)));
   7673 				WM_Q_EVCNT_INCR(txq, txsstall);
   7674 				break;
   7675 			}
   7676 		}
   7677 
   7678 		/* Grab a packet off the queue. */
   7679 		if (is_transmit)
   7680 			m0 = pcq_get(txq->txq_interq);
   7681 		else
   7682 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7683 		if (m0 == NULL)
   7684 			break;
   7685 
   7686 		DPRINTF(WM_DEBUG_TX,
   7687 		    ("%s: TX: have packet to transmit: %p\n",
   7688 			device_xname(sc->sc_dev), m0));
   7689 
   7690 		txs = &txq->txq_soft[txq->txq_snext];
   7691 		dmamap = txs->txs_dmamap;
   7692 
   7693 		use_tso = (m0->m_pkthdr.csum_flags &
   7694 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7695 
   7696 		/*
   7697 		 * So says the Linux driver:
   7698 		 * The controller does a simple calculation to make sure
   7699 		 * there is enough room in the FIFO before initiating the
   7700 		 * DMA for each buffer. The calc is:
   7701 		 *	4 = ceil(buffer len / MSS)
   7702 		 * To make sure we don't overrun the FIFO, adjust the max
   7703 		 * buffer len if the MSS drops.
   7704 		 */
   7705 		dmamap->dm_maxsegsz =
   7706 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7707 		    ? m0->m_pkthdr.segsz << 2
   7708 		    : WTX_MAX_LEN;
   7709 
   7710 		/*
   7711 		 * Load the DMA map.  If this fails, the packet either
   7712 		 * didn't fit in the allotted number of segments, or we
   7713 		 * were short on resources.  For the too-many-segments
   7714 		 * case, we simply report an error and drop the packet,
   7715 		 * since we can't sanely copy a jumbo packet to a single
   7716 		 * buffer.
   7717 		 */
   7718 retry:
   7719 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7720 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7721 		if (__predict_false(error)) {
   7722 			if (error == EFBIG) {
   7723 				if (remap == true) {
   7724 					struct mbuf *m;
   7725 
   7726 					remap = false;
   7727 					m = m_defrag(m0, M_NOWAIT);
   7728 					if (m != NULL) {
   7729 						WM_Q_EVCNT_INCR(txq, defrag);
   7730 						m0 = m;
   7731 						goto retry;
   7732 					}
   7733 				}
   7734 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7735 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7736 				    "DMA segments, dropping...\n",
   7737 				    device_xname(sc->sc_dev));
   7738 				wm_dump_mbuf_chain(sc, m0);
   7739 				m_freem(m0);
   7740 				continue;
   7741 			}
   7742 			/* Short on resources, just stop for now. */
   7743 			DPRINTF(WM_DEBUG_TX,
   7744 			    ("%s: TX: dmamap load failed: %d\n",
   7745 				device_xname(sc->sc_dev), error));
   7746 			break;
   7747 		}
   7748 
   7749 		segs_needed = dmamap->dm_nsegs;
   7750 		if (use_tso) {
   7751 			/* For sentinel descriptor; see below. */
   7752 			segs_needed++;
   7753 		}
   7754 
   7755 		/*
   7756 		 * Ensure we have enough descriptors free to describe
   7757 		 * the packet. Note, we always reserve one descriptor
   7758 		 * at the end of the ring due to the semantics of the
   7759 		 * TDT register, plus one more in the event we need
   7760 		 * to load offload context.
   7761 		 */
   7762 		if (segs_needed > txq->txq_free - 2) {
   7763 			/*
   7764 			 * Not enough free descriptors to transmit this
   7765 			 * packet.  We haven't committed anything yet,
   7766 			 * so just unload the DMA map, put the packet
   7767 			 * pack on the queue, and punt. Notify the upper
   7768 			 * layer that there are no more slots left.
   7769 			 */
   7770 			DPRINTF(WM_DEBUG_TX,
   7771 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7772 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7773 				segs_needed, txq->txq_free - 1));
   7774 			if (!is_transmit)
   7775 				ifp->if_flags |= IFF_OACTIVE;
   7776 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7777 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7778 			WM_Q_EVCNT_INCR(txq, txdstall);
   7779 			break;
   7780 		}
   7781 
   7782 		/*
   7783 		 * Check for 82547 Tx FIFO bug. We need to do this
   7784 		 * once we know we can transmit the packet, since we
   7785 		 * do some internal FIFO space accounting here.
   7786 		 */
   7787 		if (sc->sc_type == WM_T_82547 &&
   7788 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7789 			DPRINTF(WM_DEBUG_TX,
   7790 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7791 				device_xname(sc->sc_dev)));
   7792 			if (!is_transmit)
   7793 				ifp->if_flags |= IFF_OACTIVE;
   7794 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7795 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7796 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7797 			break;
   7798 		}
   7799 
   7800 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7801 
   7802 		DPRINTF(WM_DEBUG_TX,
   7803 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7804 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7805 
   7806 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7807 
   7808 		/*
   7809 		 * Store a pointer to the packet so that we can free it
   7810 		 * later.
   7811 		 *
   7812 		 * Initially, we consider the number of descriptors the
   7813 		 * packet uses the number of DMA segments.  This may be
   7814 		 * incremented by 1 if we do checksum offload (a descriptor
   7815 		 * is used to set the checksum context).
   7816 		 */
   7817 		txs->txs_mbuf = m0;
   7818 		txs->txs_firstdesc = txq->txq_next;
   7819 		txs->txs_ndesc = segs_needed;
   7820 
   7821 		/* Set up offload parameters for this packet. */
   7822 		if (m0->m_pkthdr.csum_flags &
   7823 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7824 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7825 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7826 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7827 					  &cksumfields) != 0) {
   7828 				/* Error message already displayed. */
   7829 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7830 				continue;
   7831 			}
   7832 		} else {
   7833 			cksumcmd = 0;
   7834 			cksumfields = 0;
   7835 		}
   7836 
   7837 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7838 
   7839 		/* Sync the DMA map. */
   7840 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7841 		    BUS_DMASYNC_PREWRITE);
   7842 
   7843 		/* Initialize the transmit descriptor. */
   7844 		for (nexttx = txq->txq_next, seg = 0;
   7845 		     seg < dmamap->dm_nsegs; seg++) {
   7846 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7847 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7848 			     seglen != 0;
   7849 			     curaddr += curlen, seglen -= curlen,
   7850 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7851 				curlen = seglen;
   7852 
   7853 				/*
   7854 				 * So says the Linux driver:
   7855 				 * Work around for premature descriptor
   7856 				 * write-backs in TSO mode.  Append a
   7857 				 * 4-byte sentinel descriptor.
   7858 				 */
   7859 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7860 				    curlen > 8)
   7861 					curlen -= 4;
   7862 
   7863 				wm_set_dma_addr(
   7864 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7865 				txq->txq_descs[nexttx].wtx_cmdlen
   7866 				    = htole32(cksumcmd | curlen);
   7867 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7868 				    = 0;
   7869 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7870 				    = cksumfields;
   7871 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7872 				lasttx = nexttx;
   7873 
   7874 				DPRINTF(WM_DEBUG_TX,
   7875 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7876 					"len %#04zx\n",
   7877 					device_xname(sc->sc_dev), nexttx,
   7878 					(uint64_t)curaddr, curlen));
   7879 			}
   7880 		}
   7881 
   7882 		KASSERT(lasttx != -1);
   7883 
   7884 		/*
   7885 		 * Set up the command byte on the last descriptor of
   7886 		 * the packet. If we're in the interrupt delay window,
   7887 		 * delay the interrupt.
   7888 		 */
   7889 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7890 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7891 
   7892 		/*
   7893 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7894 		 * up the descriptor to encapsulate the packet for us.
   7895 		 *
   7896 		 * This is only valid on the last descriptor of the packet.
   7897 		 */
   7898 		if (vlan_has_tag(m0)) {
   7899 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7900 			    htole32(WTX_CMD_VLE);
   7901 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7902 			    = htole16(vlan_get_tag(m0));
   7903 		}
   7904 
   7905 		txs->txs_lastdesc = lasttx;
   7906 
   7907 		DPRINTF(WM_DEBUG_TX,
   7908 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7909 			device_xname(sc->sc_dev),
   7910 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7911 
   7912 		/* Sync the descriptors we're using. */
   7913 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7914 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7915 
   7916 		/* Give the packet to the chip. */
   7917 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7918 
   7919 		DPRINTF(WM_DEBUG_TX,
   7920 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7921 
   7922 		DPRINTF(WM_DEBUG_TX,
   7923 		    ("%s: TX: finished transmitting packet, job %d\n",
   7924 			device_xname(sc->sc_dev), txq->txq_snext));
   7925 
   7926 		/* Advance the tx pointer. */
   7927 		txq->txq_free -= txs->txs_ndesc;
   7928 		txq->txq_next = nexttx;
   7929 
   7930 		txq->txq_sfree--;
   7931 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7932 
   7933 		/* Pass the packet to any BPF listeners. */
   7934 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7935 	}
   7936 
   7937 	if (m0 != NULL) {
   7938 		if (!is_transmit)
   7939 			ifp->if_flags |= IFF_OACTIVE;
   7940 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7941 		WM_Q_EVCNT_INCR(txq, descdrop);
   7942 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7943 			__func__));
   7944 		m_freem(m0);
   7945 	}
   7946 
   7947 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7948 		/* No more slots; notify upper layer. */
   7949 		if (!is_transmit)
   7950 			ifp->if_flags |= IFF_OACTIVE;
   7951 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7952 	}
   7953 
   7954 	if (txq->txq_free != ofree) {
   7955 		/* Set a watchdog timer in case the chip flakes out. */
   7956 		txq->txq_lastsent = time_uptime;
   7957 		txq->txq_sending = true;
   7958 	}
   7959 }
   7960 
   7961 /*
   7962  * wm_nq_tx_offload:
   7963  *
   7964  *	Set up TCP/IP checksumming parameters for the
   7965  *	specified packet, for NEWQUEUE devices
   7966  */
   7967 static int
   7968 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7969     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7970 {
   7971 	struct mbuf *m0 = txs->txs_mbuf;
   7972 	uint32_t vl_len, mssidx, cmdc;
   7973 	struct ether_header *eh;
   7974 	int offset, iphl;
   7975 
   7976 	/*
   7977 	 * XXX It would be nice if the mbuf pkthdr had offset
   7978 	 * fields for the protocol headers.
   7979 	 */
   7980 	*cmdlenp = 0;
   7981 	*fieldsp = 0;
   7982 
   7983 	eh = mtod(m0, struct ether_header *);
   7984 	switch (htons(eh->ether_type)) {
   7985 	case ETHERTYPE_IP:
   7986 	case ETHERTYPE_IPV6:
   7987 		offset = ETHER_HDR_LEN;
   7988 		break;
   7989 
   7990 	case ETHERTYPE_VLAN:
   7991 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7992 		break;
   7993 
   7994 	default:
   7995 		/* Don't support this protocol or encapsulation. */
   7996 		*do_csum = false;
   7997 		return 0;
   7998 	}
   7999 	*do_csum = true;
   8000 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   8001 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   8002 
   8003 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   8004 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   8005 
   8006 	if ((m0->m_pkthdr.csum_flags &
   8007 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   8008 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   8009 	} else {
   8010 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   8011 	}
   8012 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   8013 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   8014 
   8015 	if (vlan_has_tag(m0)) {
   8016 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   8017 		    << NQTXC_VLLEN_VLAN_SHIFT);
   8018 		*cmdlenp |= NQTX_CMD_VLE;
   8019 	}
   8020 
   8021 	mssidx = 0;
   8022 
   8023 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   8024 		int hlen = offset + iphl;
   8025 		int tcp_hlen;
   8026 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   8027 
   8028 		if (__predict_false(m0->m_len <
   8029 				    (hlen + sizeof(struct tcphdr)))) {
   8030 			/*
   8031 			 * TCP/IP headers are not in the first mbuf; we need
   8032 			 * to do this the slow and painful way. Let's just
   8033 			 * hope this doesn't happen very often.
   8034 			 */
   8035 			struct tcphdr th;
   8036 
   8037 			WM_Q_EVCNT_INCR(txq, tsopain);
   8038 
   8039 			m_copydata(m0, hlen, sizeof(th), &th);
   8040 			if (v4) {
   8041 				struct ip ip;
   8042 
   8043 				m_copydata(m0, offset, sizeof(ip), &ip);
   8044 				ip.ip_len = 0;
   8045 				m_copyback(m0,
   8046 				    offset + offsetof(struct ip, ip_len),
   8047 				    sizeof(ip.ip_len), &ip.ip_len);
   8048 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   8049 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   8050 			} else {
   8051 				struct ip6_hdr ip6;
   8052 
   8053 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   8054 				ip6.ip6_plen = 0;
   8055 				m_copyback(m0,
   8056 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   8057 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   8058 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   8059 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   8060 			}
   8061 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   8062 			    sizeof(th.th_sum), &th.th_sum);
   8063 
   8064 			tcp_hlen = th.th_off << 2;
   8065 		} else {
   8066 			/*
   8067 			 * TCP/IP headers are in the first mbuf; we can do
   8068 			 * this the easy way.
   8069 			 */
   8070 			struct tcphdr *th;
   8071 
   8072 			if (v4) {
   8073 				struct ip *ip =
   8074 				    (void *)(mtod(m0, char *) + offset);
   8075 				th = (void *)(mtod(m0, char *) + hlen);
   8076 
   8077 				ip->ip_len = 0;
   8078 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   8079 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   8080 			} else {
   8081 				struct ip6_hdr *ip6 =
   8082 				    (void *)(mtod(m0, char *) + offset);
   8083 				th = (void *)(mtod(m0, char *) + hlen);
   8084 
   8085 				ip6->ip6_plen = 0;
   8086 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   8087 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   8088 			}
   8089 			tcp_hlen = th->th_off << 2;
   8090 		}
   8091 		hlen += tcp_hlen;
   8092 		*cmdlenp |= NQTX_CMD_TSE;
   8093 
   8094 		if (v4) {
   8095 			WM_Q_EVCNT_INCR(txq, tso);
   8096 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   8097 		} else {
   8098 			WM_Q_EVCNT_INCR(txq, tso6);
   8099 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   8100 		}
   8101 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   8102 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8103 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   8104 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   8105 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   8106 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   8107 	} else {
   8108 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   8109 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   8110 	}
   8111 
   8112 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   8113 		*fieldsp |= NQTXD_FIELDS_IXSM;
   8114 		cmdc |= NQTXC_CMD_IP4;
   8115 	}
   8116 
   8117 	if (m0->m_pkthdr.csum_flags &
   8118 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   8119 		WM_Q_EVCNT_INCR(txq, tusum);
   8120 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   8121 			cmdc |= NQTXC_CMD_TCP;
   8122 		else
   8123 			cmdc |= NQTXC_CMD_UDP;
   8124 
   8125 		cmdc |= NQTXC_CMD_IP4;
   8126 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8127 	}
   8128 	if (m0->m_pkthdr.csum_flags &
   8129 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   8130 		WM_Q_EVCNT_INCR(txq, tusum6);
   8131 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   8132 			cmdc |= NQTXC_CMD_TCP;
   8133 		else
   8134 			cmdc |= NQTXC_CMD_UDP;
   8135 
   8136 		cmdc |= NQTXC_CMD_IP6;
   8137 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   8138 	}
   8139 
   8140 	/*
   8141 	 * We don't have to write context descriptor for every packet to
   8142 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   8143 	 * I210 and I211. It is enough to write once per a Tx queue for these
   8144 	 * controllers.
   8145 	 * It would be overhead to write context descriptor for every packet,
   8146 	 * however it does not cause problems.
   8147 	 */
   8148 	/* Fill in the context descriptor. */
   8149 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   8150 	    htole32(vl_len);
   8151 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   8152 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   8153 	    htole32(cmdc);
   8154 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   8155 	    htole32(mssidx);
   8156 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   8157 	DPRINTF(WM_DEBUG_TX,
   8158 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   8159 		txq->txq_next, 0, vl_len));
   8160 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   8161 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   8162 	txs->txs_ndesc++;
   8163 	return 0;
   8164 }
   8165 
   8166 /*
   8167  * wm_nq_start:		[ifnet interface function]
   8168  *
   8169  *	Start packet transmission on the interface for NEWQUEUE devices
   8170  */
   8171 static void
   8172 wm_nq_start(struct ifnet *ifp)
   8173 {
   8174 	struct wm_softc *sc = ifp->if_softc;
   8175 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8176 
   8177 #ifdef WM_MPSAFE
   8178 	KASSERT(if_is_mpsafe(ifp));
   8179 #endif
   8180 	/*
   8181 	 * if_obytes and if_omcasts are added in if_transmit()@if.c.
   8182 	 */
   8183 
   8184 	mutex_enter(txq->txq_lock);
   8185 	if (!txq->txq_stopping)
   8186 		wm_nq_start_locked(ifp);
   8187 	mutex_exit(txq->txq_lock);
   8188 }
   8189 
   8190 static void
   8191 wm_nq_start_locked(struct ifnet *ifp)
   8192 {
   8193 	struct wm_softc *sc = ifp->if_softc;
   8194 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8195 
   8196 	wm_nq_send_common_locked(ifp, txq, false);
   8197 }
   8198 
   8199 static int
   8200 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8201 {
   8202 	int qid;
   8203 	struct wm_softc *sc = ifp->if_softc;
   8204 	struct wm_txqueue *txq;
   8205 
   8206 	qid = wm_select_txqueue(ifp, m);
   8207 	txq = &sc->sc_queue[qid].wmq_txq;
   8208 
   8209 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8210 		m_freem(m);
   8211 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8212 		return ENOBUFS;
   8213 	}
   8214 
   8215 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
   8216 	if_statadd_ref(nsr, if_obytes, m->m_pkthdr.len);
   8217 	if (m->m_flags & M_MCAST)
   8218 		if_statinc_ref(nsr, if_omcasts);
   8219 	IF_STAT_PUTREF(ifp);
   8220 
   8221 	/*
   8222 	 * The situations which this mutex_tryenter() fails at running time
   8223 	 * are below two patterns.
   8224 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8225 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8226 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8227 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8228 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8229 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8230 	 * stuck, either.
   8231 	 */
   8232 	if (mutex_tryenter(txq->txq_lock)) {
   8233 		if (!txq->txq_stopping)
   8234 			wm_nq_transmit_locked(ifp, txq);
   8235 		mutex_exit(txq->txq_lock);
   8236 	}
   8237 
   8238 	return 0;
   8239 }
   8240 
   8241 static void
   8242 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8243 {
   8244 
   8245 	wm_nq_send_common_locked(ifp, txq, true);
   8246 }
   8247 
   8248 static void
   8249 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8250     bool is_transmit)
   8251 {
   8252 	struct wm_softc *sc = ifp->if_softc;
   8253 	struct mbuf *m0;
   8254 	struct wm_txsoft *txs;
   8255 	bus_dmamap_t dmamap;
   8256 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8257 	bool do_csum, sent;
   8258 	bool remap = true;
   8259 
   8260 	KASSERT(mutex_owned(txq->txq_lock));
   8261 
   8262 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8263 		return;
   8264 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8265 		return;
   8266 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8267 		return;
   8268 
   8269 	sent = false;
   8270 
   8271 	/*
   8272 	 * Loop through the send queue, setting up transmit descriptors
   8273 	 * until we drain the queue, or use up all available transmit
   8274 	 * descriptors.
   8275 	 */
   8276 	for (;;) {
   8277 		m0 = NULL;
   8278 
   8279 		/* Get a work queue entry. */
   8280 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8281 			wm_txeof(txq, UINT_MAX);
   8282 			if (txq->txq_sfree == 0) {
   8283 				DPRINTF(WM_DEBUG_TX,
   8284 				    ("%s: TX: no free job descriptors\n",
   8285 					device_xname(sc->sc_dev)));
   8286 				WM_Q_EVCNT_INCR(txq, txsstall);
   8287 				break;
   8288 			}
   8289 		}
   8290 
   8291 		/* Grab a packet off the queue. */
   8292 		if (is_transmit)
   8293 			m0 = pcq_get(txq->txq_interq);
   8294 		else
   8295 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8296 		if (m0 == NULL)
   8297 			break;
   8298 
   8299 		DPRINTF(WM_DEBUG_TX,
   8300 		    ("%s: TX: have packet to transmit: %p\n",
   8301 		    device_xname(sc->sc_dev), m0));
   8302 
   8303 		txs = &txq->txq_soft[txq->txq_snext];
   8304 		dmamap = txs->txs_dmamap;
   8305 
   8306 		/*
   8307 		 * Load the DMA map.  If this fails, the packet either
   8308 		 * didn't fit in the allotted number of segments, or we
   8309 		 * were short on resources.  For the too-many-segments
   8310 		 * case, we simply report an error and drop the packet,
   8311 		 * since we can't sanely copy a jumbo packet to a single
   8312 		 * buffer.
   8313 		 */
   8314 retry:
   8315 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8316 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8317 		if (__predict_false(error)) {
   8318 			if (error == EFBIG) {
   8319 				if (remap == true) {
   8320 					struct mbuf *m;
   8321 
   8322 					remap = false;
   8323 					m = m_defrag(m0, M_NOWAIT);
   8324 					if (m != NULL) {
   8325 						WM_Q_EVCNT_INCR(txq, defrag);
   8326 						m0 = m;
   8327 						goto retry;
   8328 					}
   8329 				}
   8330 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8331 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8332 				    "DMA segments, dropping...\n",
   8333 				    device_xname(sc->sc_dev));
   8334 				wm_dump_mbuf_chain(sc, m0);
   8335 				m_freem(m0);
   8336 				continue;
   8337 			}
   8338 			/* Short on resources, just stop for now. */
   8339 			DPRINTF(WM_DEBUG_TX,
   8340 			    ("%s: TX: dmamap load failed: %d\n",
   8341 				device_xname(sc->sc_dev), error));
   8342 			break;
   8343 		}
   8344 
   8345 		segs_needed = dmamap->dm_nsegs;
   8346 
   8347 		/*
   8348 		 * Ensure we have enough descriptors free to describe
   8349 		 * the packet. Note, we always reserve one descriptor
   8350 		 * at the end of the ring due to the semantics of the
   8351 		 * TDT register, plus one more in the event we need
   8352 		 * to load offload context.
   8353 		 */
   8354 		if (segs_needed > txq->txq_free - 2) {
   8355 			/*
   8356 			 * Not enough free descriptors to transmit this
   8357 			 * packet.  We haven't committed anything yet,
   8358 			 * so just unload the DMA map, put the packet
   8359 			 * pack on the queue, and punt. Notify the upper
   8360 			 * layer that there are no more slots left.
   8361 			 */
   8362 			DPRINTF(WM_DEBUG_TX,
   8363 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8364 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8365 				segs_needed, txq->txq_free - 1));
   8366 			if (!is_transmit)
   8367 				ifp->if_flags |= IFF_OACTIVE;
   8368 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8369 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8370 			WM_Q_EVCNT_INCR(txq, txdstall);
   8371 			break;
   8372 		}
   8373 
   8374 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8375 
   8376 		DPRINTF(WM_DEBUG_TX,
   8377 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8378 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8379 
   8380 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8381 
   8382 		/*
   8383 		 * Store a pointer to the packet so that we can free it
   8384 		 * later.
   8385 		 *
   8386 		 * Initially, we consider the number of descriptors the
   8387 		 * packet uses the number of DMA segments.  This may be
   8388 		 * incremented by 1 if we do checksum offload (a descriptor
   8389 		 * is used to set the checksum context).
   8390 		 */
   8391 		txs->txs_mbuf = m0;
   8392 		txs->txs_firstdesc = txq->txq_next;
   8393 		txs->txs_ndesc = segs_needed;
   8394 
   8395 		/* Set up offload parameters for this packet. */
   8396 		uint32_t cmdlen, fields, dcmdlen;
   8397 		if (m0->m_pkthdr.csum_flags &
   8398 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8399 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8400 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8401 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8402 			    &do_csum) != 0) {
   8403 				/* Error message already displayed. */
   8404 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8405 				continue;
   8406 			}
   8407 		} else {
   8408 			do_csum = false;
   8409 			cmdlen = 0;
   8410 			fields = 0;
   8411 		}
   8412 
   8413 		/* Sync the DMA map. */
   8414 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8415 		    BUS_DMASYNC_PREWRITE);
   8416 
   8417 		/* Initialize the first transmit descriptor. */
   8418 		nexttx = txq->txq_next;
   8419 		if (!do_csum) {
   8420 			/* Setup a legacy descriptor */
   8421 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8422 			    dmamap->dm_segs[0].ds_addr);
   8423 			txq->txq_descs[nexttx].wtx_cmdlen =
   8424 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8425 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8426 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8427 			if (vlan_has_tag(m0)) {
   8428 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8429 				    htole32(WTX_CMD_VLE);
   8430 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8431 				    htole16(vlan_get_tag(m0));
   8432 			} else
   8433 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8434 
   8435 			dcmdlen = 0;
   8436 		} else {
   8437 			/* Setup an advanced data descriptor */
   8438 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8439 			    htole64(dmamap->dm_segs[0].ds_addr);
   8440 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8441 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8442 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen);
   8443 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8444 			    htole32(fields);
   8445 			DPRINTF(WM_DEBUG_TX,
   8446 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8447 				device_xname(sc->sc_dev), nexttx,
   8448 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8449 			DPRINTF(WM_DEBUG_TX,
   8450 			    ("\t 0x%08x%08x\n", fields,
   8451 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8452 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8453 		}
   8454 
   8455 		lasttx = nexttx;
   8456 		nexttx = WM_NEXTTX(txq, nexttx);
   8457 		/*
   8458 		 * Fill in the next descriptors. legacy or advanced format
   8459 		 * is the same here
   8460 		 */
   8461 		for (seg = 1; seg < dmamap->dm_nsegs;
   8462 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8463 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8464 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8465 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8466 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8467 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8468 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8469 			lasttx = nexttx;
   8470 
   8471 			DPRINTF(WM_DEBUG_TX,
   8472 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8473 				device_xname(sc->sc_dev), nexttx,
   8474 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8475 				dmamap->dm_segs[seg].ds_len));
   8476 		}
   8477 
   8478 		KASSERT(lasttx != -1);
   8479 
   8480 		/*
   8481 		 * Set up the command byte on the last descriptor of
   8482 		 * the packet. If we're in the interrupt delay window,
   8483 		 * delay the interrupt.
   8484 		 */
   8485 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8486 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8487 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8488 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8489 
   8490 		txs->txs_lastdesc = lasttx;
   8491 
   8492 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8493 		    device_xname(sc->sc_dev),
   8494 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8495 
   8496 		/* Sync the descriptors we're using. */
   8497 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8498 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8499 
   8500 		/* Give the packet to the chip. */
   8501 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8502 		sent = true;
   8503 
   8504 		DPRINTF(WM_DEBUG_TX,
   8505 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8506 
   8507 		DPRINTF(WM_DEBUG_TX,
   8508 		    ("%s: TX: finished transmitting packet, job %d\n",
   8509 			device_xname(sc->sc_dev), txq->txq_snext));
   8510 
   8511 		/* Advance the tx pointer. */
   8512 		txq->txq_free -= txs->txs_ndesc;
   8513 		txq->txq_next = nexttx;
   8514 
   8515 		txq->txq_sfree--;
   8516 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8517 
   8518 		/* Pass the packet to any BPF listeners. */
   8519 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8520 	}
   8521 
   8522 	if (m0 != NULL) {
   8523 		if (!is_transmit)
   8524 			ifp->if_flags |= IFF_OACTIVE;
   8525 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8526 		WM_Q_EVCNT_INCR(txq, descdrop);
   8527 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8528 			__func__));
   8529 		m_freem(m0);
   8530 	}
   8531 
   8532 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8533 		/* No more slots; notify upper layer. */
   8534 		if (!is_transmit)
   8535 			ifp->if_flags |= IFF_OACTIVE;
   8536 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8537 	}
   8538 
   8539 	if (sent) {
   8540 		/* Set a watchdog timer in case the chip flakes out. */
   8541 		txq->txq_lastsent = time_uptime;
   8542 		txq->txq_sending = true;
   8543 	}
   8544 }
   8545 
   8546 static void
   8547 wm_deferred_start_locked(struct wm_txqueue *txq)
   8548 {
   8549 	struct wm_softc *sc = txq->txq_sc;
   8550 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8551 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8552 	int qid = wmq->wmq_id;
   8553 
   8554 	KASSERT(mutex_owned(txq->txq_lock));
   8555 
   8556 	if (txq->txq_stopping) {
   8557 		mutex_exit(txq->txq_lock);
   8558 		return;
   8559 	}
   8560 
   8561 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8562 		/* XXX need for ALTQ or one CPU system */
   8563 		if (qid == 0)
   8564 			wm_nq_start_locked(ifp);
   8565 		wm_nq_transmit_locked(ifp, txq);
   8566 	} else {
   8567 		/* XXX need for ALTQ or one CPU system */
   8568 		if (qid == 0)
   8569 			wm_start_locked(ifp);
   8570 		wm_transmit_locked(ifp, txq);
   8571 	}
   8572 }
   8573 
   8574 /* Interrupt */
   8575 
   8576 /*
   8577  * wm_txeof:
   8578  *
   8579  *	Helper; handle transmit interrupts.
   8580  */
   8581 static bool
   8582 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8583 {
   8584 	struct wm_softc *sc = txq->txq_sc;
   8585 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8586 	struct wm_txsoft *txs;
   8587 	int count = 0;
   8588 	int i;
   8589 	uint8_t status;
   8590 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8591 	bool more = false;
   8592 
   8593 	KASSERT(mutex_owned(txq->txq_lock));
   8594 
   8595 	if (txq->txq_stopping)
   8596 		return false;
   8597 
   8598 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8599 	/* For ALTQ and legacy(not use multiqueue) ethernet controller */
   8600 	if (wmq->wmq_id == 0)
   8601 		ifp->if_flags &= ~IFF_OACTIVE;
   8602 
   8603 	/*
   8604 	 * Go through the Tx list and free mbufs for those
   8605 	 * frames which have been transmitted.
   8606 	 */
   8607 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8608 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8609 		if (limit-- == 0) {
   8610 			more = true;
   8611 			DPRINTF(WM_DEBUG_TX,
   8612 			    ("%s: TX: loop limited, job %d is not processed\n",
   8613 				device_xname(sc->sc_dev), i));
   8614 			break;
   8615 		}
   8616 
   8617 		txs = &txq->txq_soft[i];
   8618 
   8619 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8620 			device_xname(sc->sc_dev), i));
   8621 
   8622 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8623 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8624 
   8625 		status =
   8626 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8627 		if ((status & WTX_ST_DD) == 0) {
   8628 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8629 			    BUS_DMASYNC_PREREAD);
   8630 			break;
   8631 		}
   8632 
   8633 		count++;
   8634 		DPRINTF(WM_DEBUG_TX,
   8635 		    ("%s: TX: job %d done: descs %d..%d\n",
   8636 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8637 		    txs->txs_lastdesc));
   8638 
   8639 		/*
   8640 		 * XXX We should probably be using the statistics
   8641 		 * XXX registers, but I don't know if they exist
   8642 		 * XXX on chips before the i82544.
   8643 		 */
   8644 
   8645 #ifdef WM_EVENT_COUNTERS
   8646 		if (status & WTX_ST_TU)
   8647 			WM_Q_EVCNT_INCR(txq, underrun);
   8648 #endif /* WM_EVENT_COUNTERS */
   8649 
   8650 		/*
   8651 		 * 82574 and newer's document says the status field has neither
   8652 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8653 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8654 		 * Developer's Manual", 82574 datasheet and newer.
   8655 		 *
   8656 		 * XXX I saw the LC bit was set on I218 even though the media
   8657 		 * was full duplex, so the bit might be used for other
   8658 		 * meaning ...(I have no document).
   8659 		 */
   8660 
   8661 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8662 		    && ((sc->sc_type < WM_T_82574)
   8663 			|| (sc->sc_type == WM_T_80003))) {
   8664 			if_statinc(ifp, if_oerrors);
   8665 			if (status & WTX_ST_LC)
   8666 				log(LOG_WARNING, "%s: late collision\n",
   8667 				    device_xname(sc->sc_dev));
   8668 			else if (status & WTX_ST_EC) {
   8669 				if_statadd(ifp, if_collisions,
   8670 				    TX_COLLISION_THRESHOLD + 1);
   8671 				log(LOG_WARNING, "%s: excessive collisions\n",
   8672 				    device_xname(sc->sc_dev));
   8673 			}
   8674 		} else
   8675 			if_statinc(ifp, if_opackets);
   8676 
   8677 		txq->txq_packets++;
   8678 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8679 
   8680 		txq->txq_free += txs->txs_ndesc;
   8681 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8682 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8683 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8684 		m_freem(txs->txs_mbuf);
   8685 		txs->txs_mbuf = NULL;
   8686 	}
   8687 
   8688 	/* Update the dirty transmit buffer pointer. */
   8689 	txq->txq_sdirty = i;
   8690 	DPRINTF(WM_DEBUG_TX,
   8691 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8692 
   8693 	/*
   8694 	 * If there are no more pending transmissions, cancel the watchdog
   8695 	 * timer.
   8696 	 */
   8697 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8698 		txq->txq_sending = false;
   8699 
   8700 	return more;
   8701 }
   8702 
   8703 static inline uint32_t
   8704 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8705 {
   8706 	struct wm_softc *sc = rxq->rxq_sc;
   8707 
   8708 	if (sc->sc_type == WM_T_82574)
   8709 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8710 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8711 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8712 	else
   8713 		return rxq->rxq_descs[idx].wrx_status;
   8714 }
   8715 
   8716 static inline uint32_t
   8717 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8718 {
   8719 	struct wm_softc *sc = rxq->rxq_sc;
   8720 
   8721 	if (sc->sc_type == WM_T_82574)
   8722 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8723 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8724 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8725 	else
   8726 		return rxq->rxq_descs[idx].wrx_errors;
   8727 }
   8728 
   8729 static inline uint16_t
   8730 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8731 {
   8732 	struct wm_softc *sc = rxq->rxq_sc;
   8733 
   8734 	if (sc->sc_type == WM_T_82574)
   8735 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8736 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8737 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8738 	else
   8739 		return rxq->rxq_descs[idx].wrx_special;
   8740 }
   8741 
   8742 static inline int
   8743 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8744 {
   8745 	struct wm_softc *sc = rxq->rxq_sc;
   8746 
   8747 	if (sc->sc_type == WM_T_82574)
   8748 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8749 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8750 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8751 	else
   8752 		return rxq->rxq_descs[idx].wrx_len;
   8753 }
   8754 
   8755 #ifdef WM_DEBUG
   8756 static inline uint32_t
   8757 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8758 {
   8759 	struct wm_softc *sc = rxq->rxq_sc;
   8760 
   8761 	if (sc->sc_type == WM_T_82574)
   8762 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8763 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8764 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8765 	else
   8766 		return 0;
   8767 }
   8768 
   8769 static inline uint8_t
   8770 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8771 {
   8772 	struct wm_softc *sc = rxq->rxq_sc;
   8773 
   8774 	if (sc->sc_type == WM_T_82574)
   8775 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8776 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8777 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8778 	else
   8779 		return 0;
   8780 }
   8781 #endif /* WM_DEBUG */
   8782 
   8783 static inline bool
   8784 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8785     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8786 {
   8787 
   8788 	if (sc->sc_type == WM_T_82574)
   8789 		return (status & ext_bit) != 0;
   8790 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8791 		return (status & nq_bit) != 0;
   8792 	else
   8793 		return (status & legacy_bit) != 0;
   8794 }
   8795 
   8796 static inline bool
   8797 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8798     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8799 {
   8800 
   8801 	if (sc->sc_type == WM_T_82574)
   8802 		return (error & ext_bit) != 0;
   8803 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8804 		return (error & nq_bit) != 0;
   8805 	else
   8806 		return (error & legacy_bit) != 0;
   8807 }
   8808 
   8809 static inline bool
   8810 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8811 {
   8812 
   8813 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8814 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8815 		return true;
   8816 	else
   8817 		return false;
   8818 }
   8819 
   8820 static inline bool
   8821 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8822 {
   8823 	struct wm_softc *sc = rxq->rxq_sc;
   8824 
   8825 	/* XXX missing error bit for newqueue? */
   8826 	if (wm_rxdesc_is_set_error(sc, errors,
   8827 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8828 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8829 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8830 		NQRXC_ERROR_RXE)) {
   8831 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8832 		    EXTRXC_ERROR_SE, 0))
   8833 			log(LOG_WARNING, "%s: symbol error\n",
   8834 			    device_xname(sc->sc_dev));
   8835 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8836 		    EXTRXC_ERROR_SEQ, 0))
   8837 			log(LOG_WARNING, "%s: receive sequence error\n",
   8838 			    device_xname(sc->sc_dev));
   8839 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8840 		    EXTRXC_ERROR_CE, 0))
   8841 			log(LOG_WARNING, "%s: CRC error\n",
   8842 			    device_xname(sc->sc_dev));
   8843 		return true;
   8844 	}
   8845 
   8846 	return false;
   8847 }
   8848 
   8849 static inline bool
   8850 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8851 {
   8852 	struct wm_softc *sc = rxq->rxq_sc;
   8853 
   8854 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8855 		NQRXC_STATUS_DD)) {
   8856 		/* We have processed all of the receive descriptors. */
   8857 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8858 		return false;
   8859 	}
   8860 
   8861 	return true;
   8862 }
   8863 
   8864 static inline bool
   8865 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8866     uint16_t vlantag, struct mbuf *m)
   8867 {
   8868 
   8869 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8870 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8871 		vlan_set_tag(m, le16toh(vlantag));
   8872 	}
   8873 
   8874 	return true;
   8875 }
   8876 
   8877 static inline void
   8878 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8879     uint32_t errors, struct mbuf *m)
   8880 {
   8881 	struct wm_softc *sc = rxq->rxq_sc;
   8882 
   8883 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8884 		if (wm_rxdesc_is_set_status(sc, status,
   8885 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8886 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8887 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8888 			if (wm_rxdesc_is_set_error(sc, errors,
   8889 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8890 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8891 		}
   8892 		if (wm_rxdesc_is_set_status(sc, status,
   8893 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8894 			/*
   8895 			 * Note: we don't know if this was TCP or UDP,
   8896 			 * so we just set both bits, and expect the
   8897 			 * upper layers to deal.
   8898 			 */
   8899 			WM_Q_EVCNT_INCR(rxq, tusum);
   8900 			m->m_pkthdr.csum_flags |=
   8901 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8902 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8903 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8904 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8905 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8906 		}
   8907 	}
   8908 }
   8909 
   8910 /*
   8911  * wm_rxeof:
   8912  *
   8913  *	Helper; handle receive interrupts.
   8914  */
   8915 static bool
   8916 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8917 {
   8918 	struct wm_softc *sc = rxq->rxq_sc;
   8919 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8920 	struct wm_rxsoft *rxs;
   8921 	struct mbuf *m;
   8922 	int i, len;
   8923 	int count = 0;
   8924 	uint32_t status, errors;
   8925 	uint16_t vlantag;
   8926 	bool more = false;
   8927 
   8928 	KASSERT(mutex_owned(rxq->rxq_lock));
   8929 
   8930 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8931 		if (limit-- == 0) {
   8932 			rxq->rxq_ptr = i;
   8933 			more = true;
   8934 			DPRINTF(WM_DEBUG_RX,
   8935 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8936 				device_xname(sc->sc_dev), i));
   8937 			break;
   8938 		}
   8939 
   8940 		rxs = &rxq->rxq_soft[i];
   8941 
   8942 		DPRINTF(WM_DEBUG_RX,
   8943 		    ("%s: RX: checking descriptor %d\n",
   8944 			device_xname(sc->sc_dev), i));
   8945 		wm_cdrxsync(rxq, i,
   8946 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8947 
   8948 		status = wm_rxdesc_get_status(rxq, i);
   8949 		errors = wm_rxdesc_get_errors(rxq, i);
   8950 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8951 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8952 #ifdef WM_DEBUG
   8953 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8954 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8955 #endif
   8956 
   8957 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8958 			/*
   8959 			 * Update the receive pointer holding rxq_lock
   8960 			 * consistent with increment counter.
   8961 			 */
   8962 			rxq->rxq_ptr = i;
   8963 			break;
   8964 		}
   8965 
   8966 		count++;
   8967 		if (__predict_false(rxq->rxq_discard)) {
   8968 			DPRINTF(WM_DEBUG_RX,
   8969 			    ("%s: RX: discarding contents of descriptor %d\n",
   8970 				device_xname(sc->sc_dev), i));
   8971 			wm_init_rxdesc(rxq, i);
   8972 			if (wm_rxdesc_is_eop(rxq, status)) {
   8973 				/* Reset our state. */
   8974 				DPRINTF(WM_DEBUG_RX,
   8975 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8976 					device_xname(sc->sc_dev)));
   8977 				rxq->rxq_discard = 0;
   8978 			}
   8979 			continue;
   8980 		}
   8981 
   8982 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8983 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8984 
   8985 		m = rxs->rxs_mbuf;
   8986 
   8987 		/*
   8988 		 * Add a new receive buffer to the ring, unless of
   8989 		 * course the length is zero. Treat the latter as a
   8990 		 * failed mapping.
   8991 		 */
   8992 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8993 			/*
   8994 			 * Failed, throw away what we've done so
   8995 			 * far, and discard the rest of the packet.
   8996 			 */
   8997 			if_statinc(ifp, if_ierrors);
   8998 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8999 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   9000 			wm_init_rxdesc(rxq, i);
   9001 			if (!wm_rxdesc_is_eop(rxq, status))
   9002 				rxq->rxq_discard = 1;
   9003 			if (rxq->rxq_head != NULL)
   9004 				m_freem(rxq->rxq_head);
   9005 			WM_RXCHAIN_RESET(rxq);
   9006 			DPRINTF(WM_DEBUG_RX,
   9007 			    ("%s: RX: Rx buffer allocation failed, "
   9008 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   9009 				rxq->rxq_discard ? " (discard)" : ""));
   9010 			continue;
   9011 		}
   9012 
   9013 		m->m_len = len;
   9014 		rxq->rxq_len += len;
   9015 		DPRINTF(WM_DEBUG_RX,
   9016 		    ("%s: RX: buffer at %p len %d\n",
   9017 			device_xname(sc->sc_dev), m->m_data, len));
   9018 
   9019 		/* If this is not the end of the packet, keep looking. */
   9020 		if (!wm_rxdesc_is_eop(rxq, status)) {
   9021 			WM_RXCHAIN_LINK(rxq, m);
   9022 			DPRINTF(WM_DEBUG_RX,
   9023 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   9024 				device_xname(sc->sc_dev), rxq->rxq_len));
   9025 			continue;
   9026 		}
   9027 
   9028 		/*
   9029 		 * Okay, we have the entire packet now. The chip is
   9030 		 * configured to include the FCS except I350 and I21[01]
   9031 		 * (not all chips can be configured to strip it),
   9032 		 * so we need to trim it.
   9033 		 * May need to adjust length of previous mbuf in the
   9034 		 * chain if the current mbuf is too short.
   9035 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   9036 		 * is always set in I350, so we don't trim it.
   9037 		 */
   9038 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   9039 		    && (sc->sc_type != WM_T_I210)
   9040 		    && (sc->sc_type != WM_T_I211)) {
   9041 			if (m->m_len < ETHER_CRC_LEN) {
   9042 				rxq->rxq_tail->m_len
   9043 				    -= (ETHER_CRC_LEN - m->m_len);
   9044 				m->m_len = 0;
   9045 			} else
   9046 				m->m_len -= ETHER_CRC_LEN;
   9047 			len = rxq->rxq_len - ETHER_CRC_LEN;
   9048 		} else
   9049 			len = rxq->rxq_len;
   9050 
   9051 		WM_RXCHAIN_LINK(rxq, m);
   9052 
   9053 		*rxq->rxq_tailp = NULL;
   9054 		m = rxq->rxq_head;
   9055 
   9056 		WM_RXCHAIN_RESET(rxq);
   9057 
   9058 		DPRINTF(WM_DEBUG_RX,
   9059 		    ("%s: RX: have entire packet, len -> %d\n",
   9060 			device_xname(sc->sc_dev), len));
   9061 
   9062 		/* If an error occurred, update stats and drop the packet. */
   9063 		if (wm_rxdesc_has_errors(rxq, errors)) {
   9064 			m_freem(m);
   9065 			continue;
   9066 		}
   9067 
   9068 		/* No errors.  Receive the packet. */
   9069 		m_set_rcvif(m, ifp);
   9070 		m->m_pkthdr.len = len;
   9071 		/*
   9072 		 * TODO
   9073 		 * should be save rsshash and rsstype to this mbuf.
   9074 		 */
   9075 		DPRINTF(WM_DEBUG_RX,
   9076 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   9077 			device_xname(sc->sc_dev), rsstype, rsshash));
   9078 
   9079 		/*
   9080 		 * If VLANs are enabled, VLAN packets have been unwrapped
   9081 		 * for us.  Associate the tag with the packet.
   9082 		 */
   9083 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   9084 			continue;
   9085 
   9086 		/* Set up checksum info for this packet. */
   9087 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   9088 		/*
   9089 		 * Update the receive pointer holding rxq_lock consistent with
   9090 		 * increment counter.
   9091 		 */
   9092 		rxq->rxq_ptr = i;
   9093 		rxq->rxq_packets++;
   9094 		rxq->rxq_bytes += len;
   9095 		mutex_exit(rxq->rxq_lock);
   9096 
   9097 		/* Pass it on. */
   9098 		if_percpuq_enqueue(sc->sc_ipq, m);
   9099 
   9100 		mutex_enter(rxq->rxq_lock);
   9101 
   9102 		if (rxq->rxq_stopping)
   9103 			break;
   9104 	}
   9105 
   9106 	DPRINTF(WM_DEBUG_RX,
   9107 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   9108 
   9109 	return more;
   9110 }
   9111 
   9112 /*
   9113  * wm_linkintr_gmii:
   9114  *
   9115  *	Helper; handle link interrupts for GMII.
   9116  */
   9117 static void
   9118 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   9119 {
   9120 	device_t dev = sc->sc_dev;
   9121 	uint32_t status, reg;
   9122 	bool link;
   9123 	int rv;
   9124 
   9125 	KASSERT(WM_CORE_LOCKED(sc));
   9126 
   9127 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(dev),
   9128 		__func__));
   9129 
   9130 	if ((icr & ICR_LSC) == 0) {
   9131 		if (icr & ICR_RXSEQ)
   9132 			DPRINTF(WM_DEBUG_LINK,
   9133 			    ("%s: LINK Receive sequence error\n",
   9134 				device_xname(dev)));
   9135 		return;
   9136 	}
   9137 
   9138 	/* Link status changed */
   9139 	status = CSR_READ(sc, WMREG_STATUS);
   9140 	link = status & STATUS_LU;
   9141 	if (link) {
   9142 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9143 			device_xname(dev),
   9144 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9145 	} else {
   9146 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9147 			device_xname(dev)));
   9148 	}
   9149 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   9150 		wm_gig_downshift_workaround_ich8lan(sc);
   9151 
   9152 	if ((sc->sc_type == WM_T_ICH8)
   9153 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   9154 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   9155 	}
   9156 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   9157 		device_xname(dev)));
   9158 	mii_pollstat(&sc->sc_mii);
   9159 	if (sc->sc_type == WM_T_82543) {
   9160 		int miistatus, active;
   9161 
   9162 		/*
   9163 		 * With 82543, we need to force speed and
   9164 		 * duplex on the MAC equal to what the PHY
   9165 		 * speed and duplex configuration is.
   9166 		 */
   9167 		miistatus = sc->sc_mii.mii_media_status;
   9168 
   9169 		if (miistatus & IFM_ACTIVE) {
   9170 			active = sc->sc_mii.mii_media_active;
   9171 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9172 			switch (IFM_SUBTYPE(active)) {
   9173 			case IFM_10_T:
   9174 				sc->sc_ctrl |= CTRL_SPEED_10;
   9175 				break;
   9176 			case IFM_100_TX:
   9177 				sc->sc_ctrl |= CTRL_SPEED_100;
   9178 				break;
   9179 			case IFM_1000_T:
   9180 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9181 				break;
   9182 			default:
   9183 				/*
   9184 				 * Fiber?
   9185 				 * Shoud not enter here.
   9186 				 */
   9187 				device_printf(dev, "unknown media (%x)\n",
   9188 				    active);
   9189 				break;
   9190 			}
   9191 			if (active & IFM_FDX)
   9192 				sc->sc_ctrl |= CTRL_FD;
   9193 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9194 		}
   9195 	} else if (sc->sc_type == WM_T_PCH) {
   9196 		wm_k1_gig_workaround_hv(sc,
   9197 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9198 	}
   9199 
   9200 	/*
   9201 	 * When connected at 10Mbps half-duplex, some parts are excessively
   9202 	 * aggressive resulting in many collisions. To avoid this, increase
   9203 	 * the IPG and reduce Rx latency in the PHY.
   9204 	 */
   9205 	if ((sc->sc_type >= WM_T_PCH2) && (sc->sc_type <= WM_T_PCH_CNP)
   9206 	    && link) {
   9207 		uint32_t tipg_reg;
   9208 		uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   9209 		bool fdx;
   9210 		uint16_t emi_addr, emi_val;
   9211 
   9212 		tipg_reg = CSR_READ(sc, WMREG_TIPG);
   9213 		tipg_reg &= ~TIPG_IPGT_MASK;
   9214 		fdx = status & STATUS_FD;
   9215 
   9216 		if (!fdx && (speed == STATUS_SPEED_10)) {
   9217 			tipg_reg |= 0xff;
   9218 			/* Reduce Rx latency in analog PHY */
   9219 			emi_val = 0;
   9220 		} else if ((sc->sc_type >= WM_T_PCH_SPT) &&
   9221 		    fdx && speed != STATUS_SPEED_1000) {
   9222 			tipg_reg |= 0xc;
   9223 			emi_val = 1;
   9224 		} else {
   9225 			/* Roll back the default values */
   9226 			tipg_reg |= 0x08;
   9227 			emi_val = 1;
   9228 		}
   9229 
   9230 		CSR_WRITE(sc, WMREG_TIPG, tipg_reg);
   9231 
   9232 		rv = sc->phy.acquire(sc);
   9233 		if (rv)
   9234 			return;
   9235 
   9236 		if (sc->sc_type == WM_T_PCH2)
   9237 			emi_addr = I82579_RX_CONFIG;
   9238 		else
   9239 			emi_addr = I217_RX_CONFIG;
   9240 		rv = wm_write_emi_reg_locked(dev, emi_addr, emi_val);
   9241 
   9242 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9243 			uint16_t phy_reg;
   9244 
   9245 			sc->phy.readreg_locked(dev, 2,
   9246 			    I217_PLL_CLOCK_GATE_REG, &phy_reg);
   9247 			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
   9248 			if (speed == STATUS_SPEED_100
   9249 			    || speed == STATUS_SPEED_10)
   9250 				phy_reg |= 0x3e8;
   9251 			else
   9252 				phy_reg |= 0xfa;
   9253 			sc->phy.writereg_locked(dev, 2,
   9254 			    I217_PLL_CLOCK_GATE_REG, phy_reg);
   9255 
   9256 			if (speed == STATUS_SPEED_1000) {
   9257 				sc->phy.readreg_locked(dev, 2,
   9258 				    HV_PM_CTRL, &phy_reg);
   9259 
   9260 				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
   9261 
   9262 				sc->phy.writereg_locked(dev, 2,
   9263 				    HV_PM_CTRL, phy_reg);
   9264 			}
   9265 		}
   9266 		sc->phy.release(sc);
   9267 
   9268 		if (rv)
   9269 			return;
   9270 
   9271 		if (sc->sc_type >= WM_T_PCH_SPT) {
   9272 			uint16_t data, ptr_gap;
   9273 
   9274 			if (speed == STATUS_SPEED_1000) {
   9275 				rv = sc->phy.acquire(sc);
   9276 				if (rv)
   9277 					return;
   9278 
   9279 				rv = sc->phy.readreg_locked(dev, 2,
   9280 				    I219_UNKNOWN1, &data);
   9281 				if (rv) {
   9282 					sc->phy.release(sc);
   9283 					return;
   9284 				}
   9285 
   9286 				ptr_gap = (data & (0x3ff << 2)) >> 2;
   9287 				if (ptr_gap < 0x18) {
   9288 					data &= ~(0x3ff << 2);
   9289 					data |= (0x18 << 2);
   9290 					rv = sc->phy.writereg_locked(dev,
   9291 					    2, I219_UNKNOWN1, data);
   9292 				}
   9293 				sc->phy.release(sc);
   9294 				if (rv)
   9295 					return;
   9296 			} else {
   9297 				rv = sc->phy.acquire(sc);
   9298 				if (rv)
   9299 					return;
   9300 
   9301 				rv = sc->phy.writereg_locked(dev, 2,
   9302 				    I219_UNKNOWN1, 0xc023);
   9303 				sc->phy.release(sc);
   9304 				if (rv)
   9305 					return;
   9306 
   9307 			}
   9308 		}
   9309 	}
   9310 
   9311 	/*
   9312 	 * I217 Packet Loss issue:
   9313 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9314 	 * on power up.
   9315 	 * Set the Beacon Duration for I217 to 8 usec
   9316 	 */
   9317 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9318 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9319 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9320 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9321 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9322 	}
   9323 
   9324 	/* Work-around I218 hang issue */
   9325 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9326 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9327 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9328 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9329 		wm_k1_workaround_lpt_lp(sc, link);
   9330 
   9331 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9332 		/*
   9333 		 * Set platform power management values for Latency
   9334 		 * Tolerance Reporting (LTR)
   9335 		 */
   9336 		wm_platform_pm_pch_lpt(sc,
   9337 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9338 	}
   9339 
   9340 	/* Clear link partner's EEE ability */
   9341 	sc->eee_lp_ability = 0;
   9342 
   9343 	/* FEXTNVM6 K1-off workaround */
   9344 	if (sc->sc_type == WM_T_PCH_SPT) {
   9345 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9346 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9347 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9348 		else
   9349 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9350 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9351 	}
   9352 
   9353 	if (!link)
   9354 		return;
   9355 
   9356 	switch (sc->sc_type) {
   9357 	case WM_T_PCH2:
   9358 		wm_k1_workaround_lv(sc);
   9359 		/* FALLTHROUGH */
   9360 	case WM_T_PCH:
   9361 		if (sc->sc_phytype == WMPHY_82578)
   9362 			wm_link_stall_workaround_hv(sc);
   9363 		break;
   9364 	default:
   9365 		break;
   9366 	}
   9367 
   9368 	/* Enable/Disable EEE after link up */
   9369 	if (sc->sc_phytype > WMPHY_82579)
   9370 		wm_set_eee_pchlan(sc);
   9371 }
   9372 
   9373 /*
   9374  * wm_linkintr_tbi:
   9375  *
   9376  *	Helper; handle link interrupts for TBI mode.
   9377  */
   9378 static void
   9379 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9380 {
   9381 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9382 	uint32_t status;
   9383 
   9384 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9385 		__func__));
   9386 
   9387 	status = CSR_READ(sc, WMREG_STATUS);
   9388 	if (icr & ICR_LSC) {
   9389 		wm_check_for_link(sc);
   9390 		if (status & STATUS_LU) {
   9391 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9392 				device_xname(sc->sc_dev),
   9393 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9394 			/*
   9395 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9396 			 * so we should update sc->sc_ctrl
   9397 			 */
   9398 
   9399 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9400 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9401 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9402 			if (status & STATUS_FD)
   9403 				sc->sc_tctl |=
   9404 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9405 			else
   9406 				sc->sc_tctl |=
   9407 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9408 			if (sc->sc_ctrl & CTRL_TFCE)
   9409 				sc->sc_fcrtl |= FCRTL_XONE;
   9410 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9411 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9412 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9413 			sc->sc_tbi_linkup = 1;
   9414 			if_link_state_change(ifp, LINK_STATE_UP);
   9415 		} else {
   9416 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9417 				device_xname(sc->sc_dev)));
   9418 			sc->sc_tbi_linkup = 0;
   9419 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9420 		}
   9421 		/* Update LED */
   9422 		wm_tbi_serdes_set_linkled(sc);
   9423 	} else if (icr & ICR_RXSEQ)
   9424 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9425 			device_xname(sc->sc_dev)));
   9426 }
   9427 
   9428 /*
   9429  * wm_linkintr_serdes:
   9430  *
   9431  *	Helper; handle link interrupts for TBI mode.
   9432  */
   9433 static void
   9434 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9435 {
   9436 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9437 	struct mii_data *mii = &sc->sc_mii;
   9438 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9439 	uint32_t pcs_adv, pcs_lpab, reg;
   9440 
   9441 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9442 		__func__));
   9443 
   9444 	if (icr & ICR_LSC) {
   9445 		/* Check PCS */
   9446 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9447 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9448 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9449 				device_xname(sc->sc_dev)));
   9450 			mii->mii_media_status |= IFM_ACTIVE;
   9451 			sc->sc_tbi_linkup = 1;
   9452 			if_link_state_change(ifp, LINK_STATE_UP);
   9453 		} else {
   9454 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9455 				device_xname(sc->sc_dev)));
   9456 			mii->mii_media_status |= IFM_NONE;
   9457 			sc->sc_tbi_linkup = 0;
   9458 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9459 			wm_tbi_serdes_set_linkled(sc);
   9460 			return;
   9461 		}
   9462 		mii->mii_media_active |= IFM_1000_SX;
   9463 		if ((reg & PCS_LSTS_FDX) != 0)
   9464 			mii->mii_media_active |= IFM_FDX;
   9465 		else
   9466 			mii->mii_media_active |= IFM_HDX;
   9467 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9468 			/* Check flow */
   9469 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9470 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9471 				DPRINTF(WM_DEBUG_LINK,
   9472 				    ("XXX LINKOK but not ACOMP\n"));
   9473 				return;
   9474 			}
   9475 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9476 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9477 			DPRINTF(WM_DEBUG_LINK,
   9478 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9479 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9480 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9481 				mii->mii_media_active |= IFM_FLOW
   9482 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9483 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9484 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9485 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9486 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9487 				mii->mii_media_active |= IFM_FLOW
   9488 				    | IFM_ETH_TXPAUSE;
   9489 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9490 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9491 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9492 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9493 				mii->mii_media_active |= IFM_FLOW
   9494 				    | IFM_ETH_RXPAUSE;
   9495 		}
   9496 		/* Update LED */
   9497 		wm_tbi_serdes_set_linkled(sc);
   9498 	} else
   9499 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9500 		    device_xname(sc->sc_dev)));
   9501 }
   9502 
   9503 /*
   9504  * wm_linkintr:
   9505  *
   9506  *	Helper; handle link interrupts.
   9507  */
   9508 static void
   9509 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9510 {
   9511 
   9512 	KASSERT(WM_CORE_LOCKED(sc));
   9513 
   9514 	if (sc->sc_flags & WM_F_HAS_MII)
   9515 		wm_linkintr_gmii(sc, icr);
   9516 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9517 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9518 		wm_linkintr_serdes(sc, icr);
   9519 	else
   9520 		wm_linkintr_tbi(sc, icr);
   9521 }
   9522 
   9523 
   9524 static inline void
   9525 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
   9526 {
   9527 
   9528 	if (wmq->wmq_txrx_use_workqueue)
   9529 		workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie, curcpu());
   9530 	else
   9531 		softint_schedule(wmq->wmq_si);
   9532 }
   9533 
   9534 /*
   9535  * wm_intr_legacy:
   9536  *
   9537  *	Interrupt service routine for INTx and MSI.
   9538  */
   9539 static int
   9540 wm_intr_legacy(void *arg)
   9541 {
   9542 	struct wm_softc *sc = arg;
   9543 	struct wm_queue *wmq = &sc->sc_queue[0];
   9544 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9545 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9546 	uint32_t icr, rndval = 0;
   9547 	int handled = 0;
   9548 
   9549 	while (1 /* CONSTCOND */) {
   9550 		icr = CSR_READ(sc, WMREG_ICR);
   9551 		if ((icr & sc->sc_icr) == 0)
   9552 			break;
   9553 		if (handled == 0)
   9554 			DPRINTF(WM_DEBUG_TX,
   9555 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9556 		if (rndval == 0)
   9557 			rndval = icr;
   9558 
   9559 		mutex_enter(rxq->rxq_lock);
   9560 
   9561 		if (rxq->rxq_stopping) {
   9562 			mutex_exit(rxq->rxq_lock);
   9563 			break;
   9564 		}
   9565 
   9566 		handled = 1;
   9567 
   9568 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9569 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9570 			DPRINTF(WM_DEBUG_RX,
   9571 			    ("%s: RX: got Rx intr 0x%08x\n",
   9572 				device_xname(sc->sc_dev),
   9573 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9574 			WM_Q_EVCNT_INCR(rxq, intr);
   9575 		}
   9576 #endif
   9577 		/*
   9578 		 * wm_rxeof() does *not* call upper layer functions directly,
   9579 		 * as if_percpuq_enqueue() just call softint_schedule().
   9580 		 * So, we can call wm_rxeof() in interrupt context.
   9581 		 */
   9582 		wm_rxeof(rxq, UINT_MAX);
   9583 		/* Fill lower bits with RX index. See below for the upper. */
   9584 		rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9585 
   9586 		mutex_exit(rxq->rxq_lock);
   9587 		mutex_enter(txq->txq_lock);
   9588 
   9589 		if (txq->txq_stopping) {
   9590 			mutex_exit(txq->txq_lock);
   9591 			break;
   9592 		}
   9593 
   9594 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9595 		if (icr & ICR_TXDW) {
   9596 			DPRINTF(WM_DEBUG_TX,
   9597 			    ("%s: TX: got TXDW interrupt\n",
   9598 				device_xname(sc->sc_dev)));
   9599 			WM_Q_EVCNT_INCR(txq, txdw);
   9600 		}
   9601 #endif
   9602 		wm_txeof(txq, UINT_MAX);
   9603 		/* Fill upper bits with TX index. See above for the lower. */
   9604 		rndval = txq->txq_next * WM_NRXDESC;
   9605 
   9606 		mutex_exit(txq->txq_lock);
   9607 		WM_CORE_LOCK(sc);
   9608 
   9609 		if (sc->sc_core_stopping) {
   9610 			WM_CORE_UNLOCK(sc);
   9611 			break;
   9612 		}
   9613 
   9614 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9615 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9616 			wm_linkintr(sc, icr);
   9617 		}
   9618 		if ((icr & ICR_GPI(0)) != 0)
   9619 			device_printf(sc->sc_dev, "got module interrupt\n");
   9620 
   9621 		WM_CORE_UNLOCK(sc);
   9622 
   9623 		if (icr & ICR_RXO) {
   9624 #if defined(WM_DEBUG)
   9625 			log(LOG_WARNING, "%s: Receive overrun\n",
   9626 			    device_xname(sc->sc_dev));
   9627 #endif /* defined(WM_DEBUG) */
   9628 		}
   9629 	}
   9630 
   9631 	rnd_add_uint32(&sc->sc_queue[0].rnd_source, rndval);
   9632 
   9633 	if (handled) {
   9634 		/* Try to get more packets going. */
   9635 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9636 		wm_sched_handle_queue(sc, wmq);
   9637 	}
   9638 
   9639 	return handled;
   9640 }
   9641 
   9642 static inline void
   9643 wm_txrxintr_disable(struct wm_queue *wmq)
   9644 {
   9645 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9646 
   9647 	if (sc->sc_type == WM_T_82574)
   9648 		CSR_WRITE(sc, WMREG_IMC,
   9649 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9650 	else if (sc->sc_type == WM_T_82575)
   9651 		CSR_WRITE(sc, WMREG_EIMC,
   9652 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9653 	else
   9654 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9655 }
   9656 
   9657 static inline void
   9658 wm_txrxintr_enable(struct wm_queue *wmq)
   9659 {
   9660 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9661 
   9662 	wm_itrs_calculate(sc, wmq);
   9663 
   9664 	/*
   9665 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9666 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9667 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9668 	 * while each wm_handle_queue(wmq) is runnig.
   9669 	 */
   9670 	if (sc->sc_type == WM_T_82574)
   9671 		CSR_WRITE(sc, WMREG_IMS,
   9672 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9673 	else if (sc->sc_type == WM_T_82575)
   9674 		CSR_WRITE(sc, WMREG_EIMS,
   9675 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9676 	else
   9677 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9678 }
   9679 
   9680 static int
   9681 wm_txrxintr_msix(void *arg)
   9682 {
   9683 	struct wm_queue *wmq = arg;
   9684 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9685 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9686 	struct wm_softc *sc = txq->txq_sc;
   9687 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9688 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9689 	uint32_t rndval = 0;
   9690 	bool txmore;
   9691 	bool rxmore;
   9692 
   9693 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9694 
   9695 	DPRINTF(WM_DEBUG_TX,
   9696 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9697 
   9698 	wm_txrxintr_disable(wmq);
   9699 
   9700 	mutex_enter(txq->txq_lock);
   9701 
   9702 	if (txq->txq_stopping) {
   9703 		mutex_exit(txq->txq_lock);
   9704 		return 0;
   9705 	}
   9706 
   9707 	WM_Q_EVCNT_INCR(txq, txdw);
   9708 	txmore = wm_txeof(txq, txlimit);
   9709 	/* Fill upper bits with TX index. See below for the lower. */
   9710 	rndval = txq->txq_next * WM_NRXDESC;
   9711 	/* wm_deferred start() is done in wm_handle_queue(). */
   9712 	mutex_exit(txq->txq_lock);
   9713 
   9714 	DPRINTF(WM_DEBUG_RX,
   9715 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9716 	mutex_enter(rxq->rxq_lock);
   9717 
   9718 	if (rxq->rxq_stopping) {
   9719 		mutex_exit(rxq->rxq_lock);
   9720 		return 0;
   9721 	}
   9722 
   9723 	WM_Q_EVCNT_INCR(rxq, intr);
   9724 	rxmore = wm_rxeof(rxq, rxlimit);
   9725 
   9726 	/* Fill lower bits with RX index. See above for the upper. */
   9727 	rndval |= rxq->rxq_ptr & WM_NRXDESC_MASK;
   9728 	mutex_exit(rxq->rxq_lock);
   9729 
   9730 	wm_itrs_writereg(sc, wmq);
   9731 
   9732 	/*
   9733 	 * This function is called in the hardware interrupt context and
   9734 	 * per-CPU, so it's not required to take a lock.
   9735 	 */
   9736 	if (rndval != 0)
   9737 		rnd_add_uint32(&sc->sc_queue[wmq->wmq_id].rnd_source, rndval);
   9738 
   9739 	if (txmore || rxmore) {
   9740 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9741 		wm_sched_handle_queue(sc, wmq);
   9742 	} else
   9743 		wm_txrxintr_enable(wmq);
   9744 
   9745 	return 1;
   9746 }
   9747 
   9748 static void
   9749 wm_handle_queue(void *arg)
   9750 {
   9751 	struct wm_queue *wmq = arg;
   9752 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9753 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9754 	struct wm_softc *sc = txq->txq_sc;
   9755 	u_int txlimit = sc->sc_tx_process_limit;
   9756 	u_int rxlimit = sc->sc_rx_process_limit;
   9757 	bool txmore;
   9758 	bool rxmore;
   9759 
   9760 	mutex_enter(txq->txq_lock);
   9761 	if (txq->txq_stopping) {
   9762 		mutex_exit(txq->txq_lock);
   9763 		return;
   9764 	}
   9765 	txmore = wm_txeof(txq, txlimit);
   9766 	wm_deferred_start_locked(txq);
   9767 	mutex_exit(txq->txq_lock);
   9768 
   9769 	mutex_enter(rxq->rxq_lock);
   9770 	if (rxq->rxq_stopping) {
   9771 		mutex_exit(rxq->rxq_lock);
   9772 		return;
   9773 	}
   9774 	WM_Q_EVCNT_INCR(rxq, defer);
   9775 	rxmore = wm_rxeof(rxq, rxlimit);
   9776 	mutex_exit(rxq->rxq_lock);
   9777 
   9778 	if (txmore || rxmore) {
   9779 		wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
   9780 		wm_sched_handle_queue(sc, wmq);
   9781 	} else
   9782 		wm_txrxintr_enable(wmq);
   9783 }
   9784 
   9785 static void
   9786 wm_handle_queue_work(struct work *wk, void *context)
   9787 {
   9788 	struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
   9789 
   9790 	/*
   9791 	 * "enqueued flag" is not required here.
   9792 	 */
   9793 	wm_handle_queue(wmq);
   9794 }
   9795 
   9796 /*
   9797  * wm_linkintr_msix:
   9798  *
   9799  *	Interrupt service routine for link status change for MSI-X.
   9800  */
   9801 static int
   9802 wm_linkintr_msix(void *arg)
   9803 {
   9804 	struct wm_softc *sc = arg;
   9805 	uint32_t reg;
   9806 	bool has_rxo;
   9807 
   9808 	reg = CSR_READ(sc, WMREG_ICR);
   9809 	WM_CORE_LOCK(sc);
   9810 	DPRINTF(WM_DEBUG_LINK,
   9811 	    ("%s: LINK: got link intr. ICR = %08x\n",
   9812 		device_xname(sc->sc_dev), reg));
   9813 
   9814 	if (sc->sc_core_stopping)
   9815 		goto out;
   9816 
   9817 	if ((reg & ICR_LSC) != 0) {
   9818 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9819 		wm_linkintr(sc, ICR_LSC);
   9820 	}
   9821 	if ((reg & ICR_GPI(0)) != 0)
   9822 		device_printf(sc->sc_dev, "got module interrupt\n");
   9823 
   9824 	/*
   9825 	 * XXX 82574 MSI-X mode workaround
   9826 	 *
   9827 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9828 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9829 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9830 	 * interrupts by writing WMREG_ICS to process receive packets.
   9831 	 */
   9832 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9833 #if defined(WM_DEBUG)
   9834 		log(LOG_WARNING, "%s: Receive overrun\n",
   9835 		    device_xname(sc->sc_dev));
   9836 #endif /* defined(WM_DEBUG) */
   9837 
   9838 		has_rxo = true;
   9839 		/*
   9840 		 * The RXO interrupt is very high rate when receive traffic is
   9841 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9842 		 * interrupts. ICR_OTHER will be enabled at the end of
   9843 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9844 		 * ICR_RXQ(1) interrupts.
   9845 		 */
   9846 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9847 
   9848 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9849 	}
   9850 
   9851 
   9852 
   9853 out:
   9854 	WM_CORE_UNLOCK(sc);
   9855 
   9856 	if (sc->sc_type == WM_T_82574) {
   9857 		if (!has_rxo)
   9858 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9859 		else
   9860 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9861 	} else if (sc->sc_type == WM_T_82575)
   9862 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9863 	else
   9864 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9865 
   9866 	return 1;
   9867 }
   9868 
   9869 /*
   9870  * Media related.
   9871  * GMII, SGMII, TBI (and SERDES)
   9872  */
   9873 
   9874 /* Common */
   9875 
   9876 /*
   9877  * wm_tbi_serdes_set_linkled:
   9878  *
   9879  *	Update the link LED on TBI and SERDES devices.
   9880  */
   9881 static void
   9882 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9883 {
   9884 
   9885 	if (sc->sc_tbi_linkup)
   9886 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9887 	else
   9888 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9889 
   9890 	/* 82540 or newer devices are active low */
   9891 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9892 
   9893 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9894 }
   9895 
   9896 /* GMII related */
   9897 
   9898 /*
   9899  * wm_gmii_reset:
   9900  *
   9901  *	Reset the PHY.
   9902  */
   9903 static void
   9904 wm_gmii_reset(struct wm_softc *sc)
   9905 {
   9906 	uint32_t reg;
   9907 	int rv;
   9908 
   9909 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9910 		device_xname(sc->sc_dev), __func__));
   9911 
   9912 	rv = sc->phy.acquire(sc);
   9913 	if (rv != 0) {
   9914 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9915 		    __func__);
   9916 		return;
   9917 	}
   9918 
   9919 	switch (sc->sc_type) {
   9920 	case WM_T_82542_2_0:
   9921 	case WM_T_82542_2_1:
   9922 		/* null */
   9923 		break;
   9924 	case WM_T_82543:
   9925 		/*
   9926 		 * With 82543, we need to force speed and duplex on the MAC
   9927 		 * equal to what the PHY speed and duplex configuration is.
   9928 		 * In addition, we need to perform a hardware reset on the PHY
   9929 		 * to take it out of reset.
   9930 		 */
   9931 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9932 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9933 
   9934 		/* The PHY reset pin is active-low. */
   9935 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9936 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9937 		    CTRL_EXT_SWDPIN(4));
   9938 		reg |= CTRL_EXT_SWDPIO(4);
   9939 
   9940 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9941 		CSR_WRITE_FLUSH(sc);
   9942 		delay(10*1000);
   9943 
   9944 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9945 		CSR_WRITE_FLUSH(sc);
   9946 		delay(150);
   9947 #if 0
   9948 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9949 #endif
   9950 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9951 		break;
   9952 	case WM_T_82544:	/* Reset 10000us */
   9953 	case WM_T_82540:
   9954 	case WM_T_82545:
   9955 	case WM_T_82545_3:
   9956 	case WM_T_82546:
   9957 	case WM_T_82546_3:
   9958 	case WM_T_82541:
   9959 	case WM_T_82541_2:
   9960 	case WM_T_82547:
   9961 	case WM_T_82547_2:
   9962 	case WM_T_82571:	/* Reset 100us */
   9963 	case WM_T_82572:
   9964 	case WM_T_82573:
   9965 	case WM_T_82574:
   9966 	case WM_T_82575:
   9967 	case WM_T_82576:
   9968 	case WM_T_82580:
   9969 	case WM_T_I350:
   9970 	case WM_T_I354:
   9971 	case WM_T_I210:
   9972 	case WM_T_I211:
   9973 	case WM_T_82583:
   9974 	case WM_T_80003:
   9975 		/* Generic reset */
   9976 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9977 		CSR_WRITE_FLUSH(sc);
   9978 		delay(20000);
   9979 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9980 		CSR_WRITE_FLUSH(sc);
   9981 		delay(20000);
   9982 
   9983 		if ((sc->sc_type == WM_T_82541)
   9984 		    || (sc->sc_type == WM_T_82541_2)
   9985 		    || (sc->sc_type == WM_T_82547)
   9986 		    || (sc->sc_type == WM_T_82547_2)) {
   9987 			/* Workaround for igp are done in igp_reset() */
   9988 			/* XXX add code to set LED after phy reset */
   9989 		}
   9990 		break;
   9991 	case WM_T_ICH8:
   9992 	case WM_T_ICH9:
   9993 	case WM_T_ICH10:
   9994 	case WM_T_PCH:
   9995 	case WM_T_PCH2:
   9996 	case WM_T_PCH_LPT:
   9997 	case WM_T_PCH_SPT:
   9998 	case WM_T_PCH_CNP:
   9999 		/* Generic reset */
   10000 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   10001 		CSR_WRITE_FLUSH(sc);
   10002 		delay(100);
   10003 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10004 		CSR_WRITE_FLUSH(sc);
   10005 		delay(150);
   10006 		break;
   10007 	default:
   10008 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   10009 		    __func__);
   10010 		break;
   10011 	}
   10012 
   10013 	sc->phy.release(sc);
   10014 
   10015 	/* get_cfg_done */
   10016 	wm_get_cfg_done(sc);
   10017 
   10018 	/* Extra setup */
   10019 	switch (sc->sc_type) {
   10020 	case WM_T_82542_2_0:
   10021 	case WM_T_82542_2_1:
   10022 	case WM_T_82543:
   10023 	case WM_T_82544:
   10024 	case WM_T_82540:
   10025 	case WM_T_82545:
   10026 	case WM_T_82545_3:
   10027 	case WM_T_82546:
   10028 	case WM_T_82546_3:
   10029 	case WM_T_82541_2:
   10030 	case WM_T_82547_2:
   10031 	case WM_T_82571:
   10032 	case WM_T_82572:
   10033 	case WM_T_82573:
   10034 	case WM_T_82574:
   10035 	case WM_T_82583:
   10036 	case WM_T_82575:
   10037 	case WM_T_82576:
   10038 	case WM_T_82580:
   10039 	case WM_T_I350:
   10040 	case WM_T_I354:
   10041 	case WM_T_I210:
   10042 	case WM_T_I211:
   10043 	case WM_T_80003:
   10044 		/* Null */
   10045 		break;
   10046 	case WM_T_82541:
   10047 	case WM_T_82547:
   10048 		/* XXX Configure actively LED after PHY reset */
   10049 		break;
   10050 	case WM_T_ICH8:
   10051 	case WM_T_ICH9:
   10052 	case WM_T_ICH10:
   10053 	case WM_T_PCH:
   10054 	case WM_T_PCH2:
   10055 	case WM_T_PCH_LPT:
   10056 	case WM_T_PCH_SPT:
   10057 	case WM_T_PCH_CNP:
   10058 		wm_phy_post_reset(sc);
   10059 		break;
   10060 	default:
   10061 		panic("%s: unknown type\n", __func__);
   10062 		break;
   10063 	}
   10064 }
   10065 
   10066 /*
   10067  * Setup sc_phytype and mii_{read|write}reg.
   10068  *
   10069  *  To identify PHY type, correct read/write function should be selected.
   10070  * To select correct read/write function, PCI ID or MAC type are required
   10071  * without accessing PHY registers.
   10072  *
   10073  *  On the first call of this function, PHY ID is not known yet. Check
   10074  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   10075  * result might be incorrect.
   10076  *
   10077  *  In the second call, PHY OUI and model is used to identify PHY type.
   10078  * It might not be perfect because of the lack of compared entry, but it
   10079  * would be better than the first call.
   10080  *
   10081  *  If the detected new result and previous assumption is different,
   10082  * diagnous message will be printed.
   10083  */
   10084 static void
   10085 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   10086     uint16_t phy_model)
   10087 {
   10088 	device_t dev = sc->sc_dev;
   10089 	struct mii_data *mii = &sc->sc_mii;
   10090 	uint16_t new_phytype = WMPHY_UNKNOWN;
   10091 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   10092 	mii_readreg_t new_readreg;
   10093 	mii_writereg_t new_writereg;
   10094 	bool dodiag = true;
   10095 
   10096 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10097 		device_xname(sc->sc_dev), __func__));
   10098 
   10099 	/*
   10100 	 * 1000BASE-T SFP uses SGMII and the first asumed PHY type is always
   10101 	 * incorrect. So don't print diag output when it's 2nd call.
   10102 	 */
   10103 	if ((sc->sc_sfptype != 0) && (phy_oui == 0) && (phy_model == 0))
   10104 		dodiag = false;
   10105 
   10106 	if (mii->mii_readreg == NULL) {
   10107 		/*
   10108 		 *  This is the first call of this function. For ICH and PCH
   10109 		 * variants, it's difficult to determine the PHY access method
   10110 		 * by sc_type, so use the PCI product ID for some devices.
   10111 		 */
   10112 
   10113 		switch (sc->sc_pcidevid) {
   10114 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   10115 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   10116 			/* 82577 */
   10117 			new_phytype = WMPHY_82577;
   10118 			break;
   10119 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   10120 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   10121 			/* 82578 */
   10122 			new_phytype = WMPHY_82578;
   10123 			break;
   10124 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   10125 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   10126 			/* 82579 */
   10127 			new_phytype = WMPHY_82579;
   10128 			break;
   10129 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   10130 		case PCI_PRODUCT_INTEL_82801I_BM:
   10131 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   10132 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   10133 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   10134 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   10135 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   10136 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   10137 			/* ICH8, 9, 10 with 82567 */
   10138 			new_phytype = WMPHY_BM;
   10139 			break;
   10140 		default:
   10141 			break;
   10142 		}
   10143 	} else {
   10144 		/* It's not the first call. Use PHY OUI and model */
   10145 		switch (phy_oui) {
   10146 		case MII_OUI_ATTANSIC: /* XXX ??? */
   10147 			switch (phy_model) {
   10148 			case 0x0004: /* XXX */
   10149 				new_phytype = WMPHY_82578;
   10150 				break;
   10151 			default:
   10152 				break;
   10153 			}
   10154 			break;
   10155 		case MII_OUI_xxMARVELL:
   10156 			switch (phy_model) {
   10157 			case MII_MODEL_xxMARVELL_I210:
   10158 				new_phytype = WMPHY_I210;
   10159 				break;
   10160 			case MII_MODEL_xxMARVELL_E1011:
   10161 			case MII_MODEL_xxMARVELL_E1000_3:
   10162 			case MII_MODEL_xxMARVELL_E1000_5:
   10163 			case MII_MODEL_xxMARVELL_E1112:
   10164 				new_phytype = WMPHY_M88;
   10165 				break;
   10166 			case MII_MODEL_xxMARVELL_E1149:
   10167 				new_phytype = WMPHY_BM;
   10168 				break;
   10169 			case MII_MODEL_xxMARVELL_E1111:
   10170 			case MII_MODEL_xxMARVELL_I347:
   10171 			case MII_MODEL_xxMARVELL_E1512:
   10172 			case MII_MODEL_xxMARVELL_E1340M:
   10173 			case MII_MODEL_xxMARVELL_E1543:
   10174 				new_phytype = WMPHY_M88;
   10175 				break;
   10176 			case MII_MODEL_xxMARVELL_I82563:
   10177 				new_phytype = WMPHY_GG82563;
   10178 				break;
   10179 			default:
   10180 				break;
   10181 			}
   10182 			break;
   10183 		case MII_OUI_INTEL:
   10184 			switch (phy_model) {
   10185 			case MII_MODEL_INTEL_I82577:
   10186 				new_phytype = WMPHY_82577;
   10187 				break;
   10188 			case MII_MODEL_INTEL_I82579:
   10189 				new_phytype = WMPHY_82579;
   10190 				break;
   10191 			case MII_MODEL_INTEL_I217:
   10192 				new_phytype = WMPHY_I217;
   10193 				break;
   10194 			case MII_MODEL_INTEL_I82580:
   10195 			case MII_MODEL_INTEL_I350:
   10196 				new_phytype = WMPHY_82580;
   10197 				break;
   10198 			default:
   10199 				break;
   10200 			}
   10201 			break;
   10202 		case MII_OUI_yyINTEL:
   10203 			switch (phy_model) {
   10204 			case MII_MODEL_yyINTEL_I82562G:
   10205 			case MII_MODEL_yyINTEL_I82562EM:
   10206 			case MII_MODEL_yyINTEL_I82562ET:
   10207 				new_phytype = WMPHY_IFE;
   10208 				break;
   10209 			case MII_MODEL_yyINTEL_IGP01E1000:
   10210 				new_phytype = WMPHY_IGP;
   10211 				break;
   10212 			case MII_MODEL_yyINTEL_I82566:
   10213 				new_phytype = WMPHY_IGP_3;
   10214 				break;
   10215 			default:
   10216 				break;
   10217 			}
   10218 			break;
   10219 		default:
   10220 			break;
   10221 		}
   10222 
   10223 		if (dodiag) {
   10224 			if (new_phytype == WMPHY_UNKNOWN)
   10225 				aprint_verbose_dev(dev,
   10226 				    "%s: Unknown PHY model. OUI=%06x, "
   10227 				    "model=%04x\n", __func__, phy_oui,
   10228 				    phy_model);
   10229 
   10230 			if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10231 			    && (sc->sc_phytype != new_phytype)) {
   10232 				aprint_error_dev(dev, "Previously assumed PHY "
   10233 				    "type(%u) was incorrect. PHY type from PHY"
   10234 				    "ID = %u\n", sc->sc_phytype, new_phytype);
   10235 			}
   10236 		}
   10237 	}
   10238 
   10239 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   10240 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   10241 		/* SGMII */
   10242 		new_readreg = wm_sgmii_readreg;
   10243 		new_writereg = wm_sgmii_writereg;
   10244 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   10245 		/* BM2 (phyaddr == 1) */
   10246 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10247 		    && (new_phytype != WMPHY_BM)
   10248 		    && (new_phytype != WMPHY_UNKNOWN))
   10249 			doubt_phytype = new_phytype;
   10250 		new_phytype = WMPHY_BM;
   10251 		new_readreg = wm_gmii_bm_readreg;
   10252 		new_writereg = wm_gmii_bm_writereg;
   10253 	} else if (sc->sc_type >= WM_T_PCH) {
   10254 		/* All PCH* use _hv_ */
   10255 		new_readreg = wm_gmii_hv_readreg;
   10256 		new_writereg = wm_gmii_hv_writereg;
   10257 	} else if (sc->sc_type >= WM_T_ICH8) {
   10258 		/* non-82567 ICH8, 9 and 10 */
   10259 		new_readreg = wm_gmii_i82544_readreg;
   10260 		new_writereg = wm_gmii_i82544_writereg;
   10261 	} else if (sc->sc_type >= WM_T_80003) {
   10262 		/* 80003 */
   10263 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10264 		    && (new_phytype != WMPHY_GG82563)
   10265 		    && (new_phytype != WMPHY_UNKNOWN))
   10266 			doubt_phytype = new_phytype;
   10267 		new_phytype = WMPHY_GG82563;
   10268 		new_readreg = wm_gmii_i80003_readreg;
   10269 		new_writereg = wm_gmii_i80003_writereg;
   10270 	} else if (sc->sc_type >= WM_T_I210) {
   10271 		/* I210 and I211 */
   10272 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10273 		    && (new_phytype != WMPHY_I210)
   10274 		    && (new_phytype != WMPHY_UNKNOWN))
   10275 			doubt_phytype = new_phytype;
   10276 		new_phytype = WMPHY_I210;
   10277 		new_readreg = wm_gmii_gs40g_readreg;
   10278 		new_writereg = wm_gmii_gs40g_writereg;
   10279 	} else if (sc->sc_type >= WM_T_82580) {
   10280 		/* 82580, I350 and I354 */
   10281 		new_readreg = wm_gmii_82580_readreg;
   10282 		new_writereg = wm_gmii_82580_writereg;
   10283 	} else if (sc->sc_type >= WM_T_82544) {
   10284 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   10285 		new_readreg = wm_gmii_i82544_readreg;
   10286 		new_writereg = wm_gmii_i82544_writereg;
   10287 	} else {
   10288 		new_readreg = wm_gmii_i82543_readreg;
   10289 		new_writereg = wm_gmii_i82543_writereg;
   10290 	}
   10291 
   10292 	if (new_phytype == WMPHY_BM) {
   10293 		/* All BM use _bm_ */
   10294 		new_readreg = wm_gmii_bm_readreg;
   10295 		new_writereg = wm_gmii_bm_writereg;
   10296 	}
   10297 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   10298 		/* All PCH* use _hv_ */
   10299 		new_readreg = wm_gmii_hv_readreg;
   10300 		new_writereg = wm_gmii_hv_writereg;
   10301 	}
   10302 
   10303 	/* Diag output */
   10304 	if (dodiag) {
   10305 		if (doubt_phytype != WMPHY_UNKNOWN)
   10306 			aprint_error_dev(dev, "Assumed new PHY type was "
   10307 			    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   10308 			    new_phytype);
   10309 		else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   10310 		    && (sc->sc_phytype != new_phytype))
   10311 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   10312 			    "was incorrect. New PHY type = %u\n",
   10313 			    sc->sc_phytype, new_phytype);
   10314 
   10315 		if ((mii->mii_readreg != NULL) &&
   10316 		    (new_phytype == WMPHY_UNKNOWN))
   10317 			aprint_error_dev(dev, "PHY type is still unknown.\n");
   10318 
   10319 		if ((mii->mii_readreg != NULL) &&
   10320 		    (mii->mii_readreg != new_readreg))
   10321 			aprint_error_dev(dev, "Previously assumed PHY "
   10322 			    "read/write function was incorrect.\n");
   10323 	}
   10324 
   10325 	/* Update now */
   10326 	sc->sc_phytype = new_phytype;
   10327 	mii->mii_readreg = new_readreg;
   10328 	mii->mii_writereg = new_writereg;
   10329 	if (new_readreg == wm_gmii_hv_readreg) {
   10330 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   10331 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   10332 	} else if (new_readreg == wm_sgmii_readreg) {
   10333 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   10334 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   10335 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   10336 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   10337 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   10338 	}
   10339 }
   10340 
   10341 /*
   10342  * wm_get_phy_id_82575:
   10343  *
   10344  * Return PHY ID. Return -1 if it failed.
   10345  */
   10346 static int
   10347 wm_get_phy_id_82575(struct wm_softc *sc)
   10348 {
   10349 	uint32_t reg;
   10350 	int phyid = -1;
   10351 
   10352 	/* XXX */
   10353 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10354 		return -1;
   10355 
   10356 	if (wm_sgmii_uses_mdio(sc)) {
   10357 		switch (sc->sc_type) {
   10358 		case WM_T_82575:
   10359 		case WM_T_82576:
   10360 			reg = CSR_READ(sc, WMREG_MDIC);
   10361 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10362 			break;
   10363 		case WM_T_82580:
   10364 		case WM_T_I350:
   10365 		case WM_T_I354:
   10366 		case WM_T_I210:
   10367 		case WM_T_I211:
   10368 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10369 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10370 			break;
   10371 		default:
   10372 			return -1;
   10373 		}
   10374 	}
   10375 
   10376 	return phyid;
   10377 }
   10378 
   10379 
   10380 /*
   10381  * wm_gmii_mediainit:
   10382  *
   10383  *	Initialize media for use on 1000BASE-T devices.
   10384  */
   10385 static void
   10386 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10387 {
   10388 	device_t dev = sc->sc_dev;
   10389 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10390 	struct mii_data *mii = &sc->sc_mii;
   10391 
   10392 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10393 		device_xname(sc->sc_dev), __func__));
   10394 
   10395 	/* We have GMII. */
   10396 	sc->sc_flags |= WM_F_HAS_MII;
   10397 
   10398 	if (sc->sc_type == WM_T_80003)
   10399 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10400 	else
   10401 		sc->sc_tipg = TIPG_1000T_DFLT;
   10402 
   10403 	/*
   10404 	 * Let the chip set speed/duplex on its own based on
   10405 	 * signals from the PHY.
   10406 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10407 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10408 	 */
   10409 	sc->sc_ctrl |= CTRL_SLU;
   10410 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10411 
   10412 	/* Initialize our media structures and probe the GMII. */
   10413 	mii->mii_ifp = ifp;
   10414 
   10415 	mii->mii_statchg = wm_gmii_statchg;
   10416 
   10417 	/* get PHY control from SMBus to PCIe */
   10418 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10419 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10420 	    || (sc->sc_type == WM_T_PCH_CNP))
   10421 		wm_init_phy_workarounds_pchlan(sc);
   10422 
   10423 	wm_gmii_reset(sc);
   10424 
   10425 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10426 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10427 	    wm_gmii_mediastatus);
   10428 
   10429 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10430 	    || (sc->sc_type == WM_T_82580)
   10431 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10432 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10433 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10434 			/* Attach only one port */
   10435 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10436 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10437 		} else {
   10438 			int i, id;
   10439 			uint32_t ctrl_ext;
   10440 
   10441 			id = wm_get_phy_id_82575(sc);
   10442 			if (id != -1) {
   10443 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10444 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10445 			}
   10446 			if ((id == -1)
   10447 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10448 				/* Power on sgmii phy if it is disabled */
   10449 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10450 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10451 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10452 				CSR_WRITE_FLUSH(sc);
   10453 				delay(300*1000); /* XXX too long */
   10454 
   10455 				/*
   10456 				 * From 1 to 8.
   10457 				 *
   10458 				 * I2C access fails with I2C register's ERROR
   10459 				 * bit set, so prevent error message while
   10460 				 * scanning.
   10461 				 */
   10462 				sc->phy.no_errprint = true;
   10463 				for (i = 1; i < 8; i++)
   10464 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10465 					    0xffffffff, i, MII_OFFSET_ANY,
   10466 					    MIIF_DOPAUSE);
   10467 				sc->phy.no_errprint = false;
   10468 
   10469 				/* Restore previous sfp cage power state */
   10470 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10471 			}
   10472 		}
   10473 	} else
   10474 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10475 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10476 
   10477 	/*
   10478 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10479 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10480 	 */
   10481 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10482 		|| (sc->sc_type == WM_T_PCH_SPT)
   10483 		|| (sc->sc_type == WM_T_PCH_CNP))
   10484 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10485 		wm_set_mdio_slow_mode_hv(sc);
   10486 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10487 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10488 	}
   10489 
   10490 	/*
   10491 	 * (For ICH8 variants)
   10492 	 * If PHY detection failed, use BM's r/w function and retry.
   10493 	 */
   10494 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10495 		/* if failed, retry with *_bm_* */
   10496 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10497 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10498 		    sc->sc_phytype);
   10499 		sc->sc_phytype = WMPHY_BM;
   10500 		mii->mii_readreg = wm_gmii_bm_readreg;
   10501 		mii->mii_writereg = wm_gmii_bm_writereg;
   10502 
   10503 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10504 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10505 	}
   10506 
   10507 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10508 		/* Any PHY wasn't find */
   10509 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10510 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10511 		sc->sc_phytype = WMPHY_NONE;
   10512 	} else {
   10513 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10514 
   10515 		/*
   10516 		 * PHY Found! Check PHY type again by the second call of
   10517 		 * wm_gmii_setup_phytype.
   10518 		 */
   10519 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10520 		    child->mii_mpd_model);
   10521 
   10522 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10523 	}
   10524 }
   10525 
   10526 /*
   10527  * wm_gmii_mediachange:	[ifmedia interface function]
   10528  *
   10529  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10530  */
   10531 static int
   10532 wm_gmii_mediachange(struct ifnet *ifp)
   10533 {
   10534 	struct wm_softc *sc = ifp->if_softc;
   10535 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10536 	uint32_t reg;
   10537 	int rc;
   10538 
   10539 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10540 		device_xname(sc->sc_dev), __func__));
   10541 	if ((ifp->if_flags & IFF_UP) == 0)
   10542 		return 0;
   10543 
   10544 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10545 	if ((sc->sc_type == WM_T_82580)
   10546 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10547 	    || (sc->sc_type == WM_T_I211)) {
   10548 		reg = CSR_READ(sc, WMREG_PHPM);
   10549 		reg &= ~PHPM_GO_LINK_D;
   10550 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10551 	}
   10552 
   10553 	/* Disable D0 LPLU. */
   10554 	wm_lplu_d0_disable(sc);
   10555 
   10556 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10557 	sc->sc_ctrl |= CTRL_SLU;
   10558 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10559 	    || (sc->sc_type > WM_T_82543)) {
   10560 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10561 	} else {
   10562 		sc->sc_ctrl &= ~CTRL_ASDE;
   10563 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10564 		if (ife->ifm_media & IFM_FDX)
   10565 			sc->sc_ctrl |= CTRL_FD;
   10566 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10567 		case IFM_10_T:
   10568 			sc->sc_ctrl |= CTRL_SPEED_10;
   10569 			break;
   10570 		case IFM_100_TX:
   10571 			sc->sc_ctrl |= CTRL_SPEED_100;
   10572 			break;
   10573 		case IFM_1000_T:
   10574 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10575 			break;
   10576 		case IFM_NONE:
   10577 			/* There is no specific setting for IFM_NONE */
   10578 			break;
   10579 		default:
   10580 			panic("wm_gmii_mediachange: bad media 0x%x",
   10581 			    ife->ifm_media);
   10582 		}
   10583 	}
   10584 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10585 	CSR_WRITE_FLUSH(sc);
   10586 
   10587 	if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   10588 		wm_serdes_mediachange(ifp);
   10589 
   10590 	if (sc->sc_type <= WM_T_82543)
   10591 		wm_gmii_reset(sc);
   10592 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   10593 	    && ((sc->sc_flags & WM_F_SGMII) != 0)) {
   10594 		/* allow time for SFP cage time to power up phy */
   10595 		delay(300 * 1000);
   10596 		wm_gmii_reset(sc);
   10597 	}
   10598 
   10599 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10600 		return 0;
   10601 	return rc;
   10602 }
   10603 
   10604 /*
   10605  * wm_gmii_mediastatus:	[ifmedia interface function]
   10606  *
   10607  *	Get the current interface media status on a 1000BASE-T device.
   10608  */
   10609 static void
   10610 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10611 {
   10612 	struct wm_softc *sc = ifp->if_softc;
   10613 
   10614 	ether_mediastatus(ifp, ifmr);
   10615 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10616 	    | sc->sc_flowflags;
   10617 }
   10618 
   10619 #define	MDI_IO		CTRL_SWDPIN(2)
   10620 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10621 #define	MDI_CLK		CTRL_SWDPIN(3)
   10622 
   10623 static void
   10624 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10625 {
   10626 	uint32_t i, v;
   10627 
   10628 	v = CSR_READ(sc, WMREG_CTRL);
   10629 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10630 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10631 
   10632 	for (i = __BIT(nbits - 1); i != 0; i >>= 1) {
   10633 		if (data & i)
   10634 			v |= MDI_IO;
   10635 		else
   10636 			v &= ~MDI_IO;
   10637 		CSR_WRITE(sc, WMREG_CTRL, v);
   10638 		CSR_WRITE_FLUSH(sc);
   10639 		delay(10);
   10640 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10641 		CSR_WRITE_FLUSH(sc);
   10642 		delay(10);
   10643 		CSR_WRITE(sc, WMREG_CTRL, v);
   10644 		CSR_WRITE_FLUSH(sc);
   10645 		delay(10);
   10646 	}
   10647 }
   10648 
   10649 static uint16_t
   10650 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10651 {
   10652 	uint32_t v, i;
   10653 	uint16_t data = 0;
   10654 
   10655 	v = CSR_READ(sc, WMREG_CTRL);
   10656 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10657 	v |= CTRL_SWDPIO(3);
   10658 
   10659 	CSR_WRITE(sc, WMREG_CTRL, v);
   10660 	CSR_WRITE_FLUSH(sc);
   10661 	delay(10);
   10662 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10663 	CSR_WRITE_FLUSH(sc);
   10664 	delay(10);
   10665 	CSR_WRITE(sc, WMREG_CTRL, v);
   10666 	CSR_WRITE_FLUSH(sc);
   10667 	delay(10);
   10668 
   10669 	for (i = 0; i < 16; i++) {
   10670 		data <<= 1;
   10671 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10672 		CSR_WRITE_FLUSH(sc);
   10673 		delay(10);
   10674 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10675 			data |= 1;
   10676 		CSR_WRITE(sc, WMREG_CTRL, v);
   10677 		CSR_WRITE_FLUSH(sc);
   10678 		delay(10);
   10679 	}
   10680 
   10681 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10682 	CSR_WRITE_FLUSH(sc);
   10683 	delay(10);
   10684 	CSR_WRITE(sc, WMREG_CTRL, v);
   10685 	CSR_WRITE_FLUSH(sc);
   10686 	delay(10);
   10687 
   10688 	return data;
   10689 }
   10690 
   10691 #undef MDI_IO
   10692 #undef MDI_DIR
   10693 #undef MDI_CLK
   10694 
   10695 /*
   10696  * wm_gmii_i82543_readreg:	[mii interface function]
   10697  *
   10698  *	Read a PHY register on the GMII (i82543 version).
   10699  */
   10700 static int
   10701 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10702 {
   10703 	struct wm_softc *sc = device_private(dev);
   10704 
   10705 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10706 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10707 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10708 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10709 
   10710 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10711 		device_xname(dev), phy, reg, *val));
   10712 
   10713 	return 0;
   10714 }
   10715 
   10716 /*
   10717  * wm_gmii_i82543_writereg:	[mii interface function]
   10718  *
   10719  *	Write a PHY register on the GMII (i82543 version).
   10720  */
   10721 static int
   10722 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10723 {
   10724 	struct wm_softc *sc = device_private(dev);
   10725 
   10726 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10727 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10728 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10729 	    (MII_COMMAND_START << 30), 32);
   10730 
   10731 	return 0;
   10732 }
   10733 
   10734 /*
   10735  * wm_gmii_mdic_readreg:	[mii interface function]
   10736  *
   10737  *	Read a PHY register on the GMII.
   10738  */
   10739 static int
   10740 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10741 {
   10742 	struct wm_softc *sc = device_private(dev);
   10743 	uint32_t mdic = 0;
   10744 	int i;
   10745 
   10746 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10747 	    && (reg > MII_ADDRMASK)) {
   10748 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10749 		    __func__, sc->sc_phytype, reg);
   10750 		reg &= MII_ADDRMASK;
   10751 	}
   10752 
   10753 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10754 	    MDIC_REGADD(reg));
   10755 
   10756 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10757 		delay(50);
   10758 		mdic = CSR_READ(sc, WMREG_MDIC);
   10759 		if (mdic & MDIC_READY)
   10760 			break;
   10761 	}
   10762 
   10763 	if ((mdic & MDIC_READY) == 0) {
   10764 		DPRINTF(WM_DEBUG_GMII,
   10765 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10766 			device_xname(dev), phy, reg));
   10767 		return ETIMEDOUT;
   10768 	} else if (mdic & MDIC_E) {
   10769 		/* This is normal if no PHY is present. */
   10770 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10771 			device_xname(sc->sc_dev), phy, reg));
   10772 		return -1;
   10773 	} else
   10774 		*val = MDIC_DATA(mdic);
   10775 
   10776 	/*
   10777 	 * Allow some time after each MDIC transaction to avoid
   10778 	 * reading duplicate data in the next MDIC transaction.
   10779 	 */
   10780 	if (sc->sc_type == WM_T_PCH2)
   10781 		delay(100);
   10782 
   10783 	return 0;
   10784 }
   10785 
   10786 /*
   10787  * wm_gmii_mdic_writereg:	[mii interface function]
   10788  *
   10789  *	Write a PHY register on the GMII.
   10790  */
   10791 static int
   10792 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10793 {
   10794 	struct wm_softc *sc = device_private(dev);
   10795 	uint32_t mdic = 0;
   10796 	int i;
   10797 
   10798 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10799 	    && (reg > MII_ADDRMASK)) {
   10800 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10801 		    __func__, sc->sc_phytype, reg);
   10802 		reg &= MII_ADDRMASK;
   10803 	}
   10804 
   10805 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10806 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10807 
   10808 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10809 		delay(50);
   10810 		mdic = CSR_READ(sc, WMREG_MDIC);
   10811 		if (mdic & MDIC_READY)
   10812 			break;
   10813 	}
   10814 
   10815 	if ((mdic & MDIC_READY) == 0) {
   10816 		DPRINTF(WM_DEBUG_GMII,
   10817 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10818 			device_xname(dev), phy, reg));
   10819 		return ETIMEDOUT;
   10820 	} else if (mdic & MDIC_E) {
   10821 		DPRINTF(WM_DEBUG_GMII,
   10822 		    ("%s: MDIC write error: phy %d reg %d\n",
   10823 			device_xname(dev), phy, reg));
   10824 		return -1;
   10825 	}
   10826 
   10827 	/*
   10828 	 * Allow some time after each MDIC transaction to avoid
   10829 	 * reading duplicate data in the next MDIC transaction.
   10830 	 */
   10831 	if (sc->sc_type == WM_T_PCH2)
   10832 		delay(100);
   10833 
   10834 	return 0;
   10835 }
   10836 
   10837 /*
   10838  * wm_gmii_i82544_readreg:	[mii interface function]
   10839  *
   10840  *	Read a PHY register on the GMII.
   10841  */
   10842 static int
   10843 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10844 {
   10845 	struct wm_softc *sc = device_private(dev);
   10846 	int rv;
   10847 
   10848 	if (sc->phy.acquire(sc)) {
   10849 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10850 		return -1;
   10851 	}
   10852 
   10853 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10854 
   10855 	sc->phy.release(sc);
   10856 
   10857 	return rv;
   10858 }
   10859 
   10860 static int
   10861 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10862 {
   10863 	struct wm_softc *sc = device_private(dev);
   10864 	int rv;
   10865 
   10866 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10867 		switch (sc->sc_phytype) {
   10868 		case WMPHY_IGP:
   10869 		case WMPHY_IGP_2:
   10870 		case WMPHY_IGP_3:
   10871 			rv = wm_gmii_mdic_writereg(dev, phy,
   10872 			    MII_IGPHY_PAGE_SELECT, reg);
   10873 			if (rv != 0)
   10874 				return rv;
   10875 			break;
   10876 		default:
   10877 #ifdef WM_DEBUG
   10878 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10879 			    __func__, sc->sc_phytype, reg);
   10880 #endif
   10881 			break;
   10882 		}
   10883 	}
   10884 
   10885 	return wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10886 }
   10887 
   10888 /*
   10889  * wm_gmii_i82544_writereg:	[mii interface function]
   10890  *
   10891  *	Write a PHY register on the GMII.
   10892  */
   10893 static int
   10894 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10895 {
   10896 	struct wm_softc *sc = device_private(dev);
   10897 	int rv;
   10898 
   10899 	if (sc->phy.acquire(sc)) {
   10900 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10901 		return -1;
   10902 	}
   10903 
   10904 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10905 	sc->phy.release(sc);
   10906 
   10907 	return rv;
   10908 }
   10909 
   10910 static int
   10911 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10912 {
   10913 	struct wm_softc *sc = device_private(dev);
   10914 	int rv;
   10915 
   10916 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10917 		switch (sc->sc_phytype) {
   10918 		case WMPHY_IGP:
   10919 		case WMPHY_IGP_2:
   10920 		case WMPHY_IGP_3:
   10921 			rv = wm_gmii_mdic_writereg(dev, phy,
   10922 			    MII_IGPHY_PAGE_SELECT, reg);
   10923 			if (rv != 0)
   10924 				return rv;
   10925 			break;
   10926 		default:
   10927 #ifdef WM_DEBUG
   10928 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10929 			    __func__, sc->sc_phytype, reg);
   10930 #endif
   10931 			break;
   10932 		}
   10933 	}
   10934 
   10935 	return wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10936 }
   10937 
   10938 /*
   10939  * wm_gmii_i80003_readreg:	[mii interface function]
   10940  *
   10941  *	Read a PHY register on the kumeran
   10942  * This could be handled by the PHY layer if we didn't have to lock the
   10943  * ressource ...
   10944  */
   10945 static int
   10946 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10947 {
   10948 	struct wm_softc *sc = device_private(dev);
   10949 	int page_select;
   10950 	uint16_t temp, temp2;
   10951 	int rv = 0;
   10952 
   10953 	if (phy != 1) /* Only one PHY on kumeran bus */
   10954 		return -1;
   10955 
   10956 	if (sc->phy.acquire(sc)) {
   10957 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10958 		return -1;
   10959 	}
   10960 
   10961 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10962 		page_select = GG82563_PHY_PAGE_SELECT;
   10963 	else {
   10964 		/*
   10965 		 * Use Alternative Page Select register to access registers
   10966 		 * 30 and 31.
   10967 		 */
   10968 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10969 	}
   10970 	temp = reg >> GG82563_PAGE_SHIFT;
   10971 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10972 		goto out;
   10973 
   10974 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10975 		/*
   10976 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10977 		 * register.
   10978 		 */
   10979 		delay(200);
   10980 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10981 		if ((rv != 0) || (temp2 != temp)) {
   10982 			device_printf(dev, "%s failed\n", __func__);
   10983 			rv = -1;
   10984 			goto out;
   10985 		}
   10986 		delay(200);
   10987 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10988 		delay(200);
   10989 	} else
   10990 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10991 
   10992 out:
   10993 	sc->phy.release(sc);
   10994 	return rv;
   10995 }
   10996 
   10997 /*
   10998  * wm_gmii_i80003_writereg:	[mii interface function]
   10999  *
   11000  *	Write a PHY register on the kumeran.
   11001  * This could be handled by the PHY layer if we didn't have to lock the
   11002  * ressource ...
   11003  */
   11004 static int
   11005 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   11006 {
   11007 	struct wm_softc *sc = device_private(dev);
   11008 	int page_select, rv;
   11009 	uint16_t temp, temp2;
   11010 
   11011 	if (phy != 1) /* Only one PHY on kumeran bus */
   11012 		return -1;
   11013 
   11014 	if (sc->phy.acquire(sc)) {
   11015 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11016 		return -1;
   11017 	}
   11018 
   11019 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   11020 		page_select = GG82563_PHY_PAGE_SELECT;
   11021 	else {
   11022 		/*
   11023 		 * Use Alternative Page Select register to access registers
   11024 		 * 30 and 31.
   11025 		 */
   11026 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   11027 	}
   11028 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   11029 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   11030 		goto out;
   11031 
   11032 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   11033 		/*
   11034 		 * Wait more 200us for a bug of the ready bit in the MDIC
   11035 		 * register.
   11036 		 */
   11037 		delay(200);
   11038 		rv = wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   11039 		if ((rv != 0) || (temp2 != temp)) {
   11040 			device_printf(dev, "%s failed\n", __func__);
   11041 			rv = -1;
   11042 			goto out;
   11043 		}
   11044 		delay(200);
   11045 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11046 		delay(200);
   11047 	} else
   11048 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11049 
   11050 out:
   11051 	sc->phy.release(sc);
   11052 	return rv;
   11053 }
   11054 
   11055 /*
   11056  * wm_gmii_bm_readreg:	[mii interface function]
   11057  *
   11058  *	Read a PHY register on the kumeran
   11059  * This could be handled by the PHY layer if we didn't have to lock the
   11060  * ressource ...
   11061  */
   11062 static int
   11063 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11064 {
   11065 	struct wm_softc *sc = device_private(dev);
   11066 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11067 	int rv;
   11068 
   11069 	if (sc->phy.acquire(sc)) {
   11070 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11071 		return -1;
   11072 	}
   11073 
   11074 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11075 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11076 		    || (reg == 31)) ? 1 : phy;
   11077 	/* Page 800 works differently than the rest so it has its own func */
   11078 	if (page == BM_WUC_PAGE) {
   11079 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11080 		goto release;
   11081 	}
   11082 
   11083 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11084 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11085 		    && (sc->sc_type != WM_T_82583))
   11086 			rv = wm_gmii_mdic_writereg(dev, phy,
   11087 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11088 		else
   11089 			rv = wm_gmii_mdic_writereg(dev, phy,
   11090 			    BME1000_PHY_PAGE_SELECT, page);
   11091 		if (rv != 0)
   11092 			goto release;
   11093 	}
   11094 
   11095 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   11096 
   11097 release:
   11098 	sc->phy.release(sc);
   11099 	return rv;
   11100 }
   11101 
   11102 /*
   11103  * wm_gmii_bm_writereg:	[mii interface function]
   11104  *
   11105  *	Write a PHY register on the kumeran.
   11106  * This could be handled by the PHY layer if we didn't have to lock the
   11107  * ressource ...
   11108  */
   11109 static int
   11110 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   11111 {
   11112 	struct wm_softc *sc = device_private(dev);
   11113 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   11114 	int rv;
   11115 
   11116 	if (sc->phy.acquire(sc)) {
   11117 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11118 		return -1;
   11119 	}
   11120 
   11121 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   11122 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   11123 		    || (reg == 31)) ? 1 : phy;
   11124 	/* Page 800 works differently than the rest so it has its own func */
   11125 	if (page == BM_WUC_PAGE) {
   11126 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   11127 		goto release;
   11128 	}
   11129 
   11130 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   11131 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   11132 		    && (sc->sc_type != WM_T_82583))
   11133 			rv = wm_gmii_mdic_writereg(dev, phy,
   11134 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11135 		else
   11136 			rv = wm_gmii_mdic_writereg(dev, phy,
   11137 			    BME1000_PHY_PAGE_SELECT, page);
   11138 		if (rv != 0)
   11139 			goto release;
   11140 	}
   11141 
   11142 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   11143 
   11144 release:
   11145 	sc->phy.release(sc);
   11146 	return rv;
   11147 }
   11148 
   11149 /*
   11150  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   11151  *  @dev: pointer to the HW structure
   11152  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   11153  *
   11154  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   11155  *  address to store contents of the BM_WUC_ENABLE_REG register.
   11156  */
   11157 static int
   11158 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11159 {
   11160 	uint16_t temp;
   11161 	int rv;
   11162 
   11163 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11164 		device_xname(dev), __func__));
   11165 
   11166 	if (!phy_regp)
   11167 		return -1;
   11168 
   11169 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   11170 
   11171 	/* Select Port Control Registers page */
   11172 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11173 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11174 	if (rv != 0)
   11175 		return rv;
   11176 
   11177 	/* Read WUCE and save it */
   11178 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   11179 	if (rv != 0)
   11180 		return rv;
   11181 
   11182 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   11183 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   11184 	 */
   11185 	temp = *phy_regp;
   11186 	temp |= BM_WUC_ENABLE_BIT;
   11187 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   11188 
   11189 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   11190 		return rv;
   11191 
   11192 	/* Select Host Wakeup Registers page - caller now able to write
   11193 	 * registers on the Wakeup registers page
   11194 	 */
   11195 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11196 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   11197 }
   11198 
   11199 /*
   11200  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   11201  *  @dev: pointer to the HW structure
   11202  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   11203  *
   11204  *  Restore BM_WUC_ENABLE_REG to its original value.
   11205  *
   11206  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   11207  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   11208  *  caller.
   11209  */
   11210 static int
   11211 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   11212 {
   11213 
   11214 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11215 		device_xname(dev), __func__));
   11216 
   11217 	if (!phy_regp)
   11218 		return -1;
   11219 
   11220 	/* Select Port Control Registers page */
   11221 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11222 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   11223 
   11224 	/* Restore 769.17 to its original value */
   11225 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   11226 
   11227 	return 0;
   11228 }
   11229 
   11230 /*
   11231  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   11232  *  @sc: pointer to the HW structure
   11233  *  @offset: register offset to be read or written
   11234  *  @val: pointer to the data to read or write
   11235  *  @rd: determines if operation is read or write
   11236  *  @page_set: BM_WUC_PAGE already set and access enabled
   11237  *
   11238  *  Read the PHY register at offset and store the retrieved information in
   11239  *  data, or write data to PHY register at offset.  Note the procedure to
   11240  *  access the PHY wakeup registers is different than reading the other PHY
   11241  *  registers. It works as such:
   11242  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   11243  *  2) Set page to 800 for host (801 if we were manageability)
   11244  *  3) Write the address using the address opcode (0x11)
   11245  *  4) Read or write the data using the data opcode (0x12)
   11246  *  5) Restore 769.17.2 to its original value
   11247  *
   11248  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   11249  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   11250  *
   11251  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   11252  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   11253  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   11254  */
   11255 static int
   11256 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   11257 	bool page_set)
   11258 {
   11259 	struct wm_softc *sc = device_private(dev);
   11260 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   11261 	uint16_t page = BM_PHY_REG_PAGE(offset);
   11262 	uint16_t wuce;
   11263 	int rv = 0;
   11264 
   11265 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11266 		device_xname(dev), __func__));
   11267 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   11268 	if ((sc->sc_type == WM_T_PCH)
   11269 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   11270 		device_printf(dev,
   11271 		    "Attempting to access page %d while gig enabled.\n", page);
   11272 	}
   11273 
   11274 	if (!page_set) {
   11275 		/* Enable access to PHY wakeup registers */
   11276 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   11277 		if (rv != 0) {
   11278 			device_printf(dev,
   11279 			    "%s: Could not enable PHY wakeup reg access\n",
   11280 			    __func__);
   11281 			return rv;
   11282 		}
   11283 	}
   11284 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   11285 		device_xname(sc->sc_dev), __func__, page, regnum));
   11286 
   11287 	/*
   11288 	 * 2) Access PHY wakeup register.
   11289 	 * See wm_access_phy_wakeup_reg_bm.
   11290 	 */
   11291 
   11292 	/* Write the Wakeup register page offset value using opcode 0x11 */
   11293 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   11294 	if (rv != 0)
   11295 		return rv;
   11296 
   11297 	if (rd) {
   11298 		/* Read the Wakeup register page value using opcode 0x12 */
   11299 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   11300 	} else {
   11301 		/* Write the Wakeup register page value using opcode 0x12 */
   11302 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   11303 	}
   11304 	if (rv != 0)
   11305 		return rv;
   11306 
   11307 	if (!page_set)
   11308 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   11309 
   11310 	return rv;
   11311 }
   11312 
   11313 /*
   11314  * wm_gmii_hv_readreg:	[mii interface function]
   11315  *
   11316  *	Read a PHY register on the kumeran
   11317  * This could be handled by the PHY layer if we didn't have to lock the
   11318  * ressource ...
   11319  */
   11320 static int
   11321 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11322 {
   11323 	struct wm_softc *sc = device_private(dev);
   11324 	int rv;
   11325 
   11326 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11327 		device_xname(dev), __func__));
   11328 	if (sc->phy.acquire(sc)) {
   11329 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11330 		return -1;
   11331 	}
   11332 
   11333 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   11334 	sc->phy.release(sc);
   11335 	return rv;
   11336 }
   11337 
   11338 static int
   11339 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11340 {
   11341 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11342 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11343 	int rv;
   11344 
   11345 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11346 
   11347 	/* Page 800 works differently than the rest so it has its own func */
   11348 	if (page == BM_WUC_PAGE)
   11349 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   11350 
   11351 	/*
   11352 	 * Lower than page 768 works differently than the rest so it has its
   11353 	 * own func
   11354 	 */
   11355 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11356 		device_printf(dev, "gmii_hv_readreg!!!\n");
   11357 		return -1;
   11358 	}
   11359 
   11360 	/*
   11361 	 * XXX I21[789] documents say that the SMBus Address register is at
   11362 	 * PHY address 01, Page 0 (not 768), Register 26.
   11363 	 */
   11364 	if (page == HV_INTC_FC_PAGE_START)
   11365 		page = 0;
   11366 
   11367 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11368 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11369 		    page << BME1000_PAGE_SHIFT);
   11370 		if (rv != 0)
   11371 			return rv;
   11372 	}
   11373 
   11374 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11375 }
   11376 
   11377 /*
   11378  * wm_gmii_hv_writereg:	[mii interface function]
   11379  *
   11380  *	Write a PHY register on the kumeran.
   11381  * This could be handled by the PHY layer if we didn't have to lock the
   11382  * ressource ...
   11383  */
   11384 static int
   11385 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11386 {
   11387 	struct wm_softc *sc = device_private(dev);
   11388 	int rv;
   11389 
   11390 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11391 		device_xname(dev), __func__));
   11392 
   11393 	if (sc->phy.acquire(sc)) {
   11394 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11395 		return -1;
   11396 	}
   11397 
   11398 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11399 	sc->phy.release(sc);
   11400 
   11401 	return rv;
   11402 }
   11403 
   11404 static int
   11405 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11406 {
   11407 	struct wm_softc *sc = device_private(dev);
   11408 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11409 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11410 	int rv;
   11411 
   11412 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11413 
   11414 	/* Page 800 works differently than the rest so it has its own func */
   11415 	if (page == BM_WUC_PAGE)
   11416 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11417 		    false);
   11418 
   11419 	/*
   11420 	 * Lower than page 768 works differently than the rest so it has its
   11421 	 * own func
   11422 	 */
   11423 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11424 		device_printf(dev, "gmii_hv_writereg!!!\n");
   11425 		return -1;
   11426 	}
   11427 
   11428 	{
   11429 		/*
   11430 		 * XXX I21[789] documents say that the SMBus Address register
   11431 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11432 		 */
   11433 		if (page == HV_INTC_FC_PAGE_START)
   11434 			page = 0;
   11435 
   11436 		/*
   11437 		 * XXX Workaround MDIO accesses being disabled after entering
   11438 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11439 		 * register is set)
   11440 		 */
   11441 		if (sc->sc_phytype == WMPHY_82578) {
   11442 			struct mii_softc *child;
   11443 
   11444 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11445 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11446 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11447 			    && ((val & (1 << 11)) != 0)) {
   11448 				device_printf(dev, "XXX need workaround\n");
   11449 			}
   11450 		}
   11451 
   11452 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11453 			rv = wm_gmii_mdic_writereg(dev, 1,
   11454 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11455 			if (rv != 0)
   11456 				return rv;
   11457 		}
   11458 	}
   11459 
   11460 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11461 }
   11462 
   11463 /*
   11464  * wm_gmii_82580_readreg:	[mii interface function]
   11465  *
   11466  *	Read a PHY register on the 82580 and I350.
   11467  * This could be handled by the PHY layer if we didn't have to lock the
   11468  * ressource ...
   11469  */
   11470 static int
   11471 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11472 {
   11473 	struct wm_softc *sc = device_private(dev);
   11474 	int rv;
   11475 
   11476 	if (sc->phy.acquire(sc) != 0) {
   11477 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11478 		return -1;
   11479 	}
   11480 
   11481 #ifdef DIAGNOSTIC
   11482 	if (reg > MII_ADDRMASK) {
   11483 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11484 		    __func__, sc->sc_phytype, reg);
   11485 		reg &= MII_ADDRMASK;
   11486 	}
   11487 #endif
   11488 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11489 
   11490 	sc->phy.release(sc);
   11491 	return rv;
   11492 }
   11493 
   11494 /*
   11495  * wm_gmii_82580_writereg:	[mii interface function]
   11496  *
   11497  *	Write a PHY register on the 82580 and I350.
   11498  * This could be handled by the PHY layer if we didn't have to lock the
   11499  * ressource ...
   11500  */
   11501 static int
   11502 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11503 {
   11504 	struct wm_softc *sc = device_private(dev);
   11505 	int rv;
   11506 
   11507 	if (sc->phy.acquire(sc) != 0) {
   11508 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11509 		return -1;
   11510 	}
   11511 
   11512 #ifdef DIAGNOSTIC
   11513 	if (reg > MII_ADDRMASK) {
   11514 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11515 		    __func__, sc->sc_phytype, reg);
   11516 		reg &= MII_ADDRMASK;
   11517 	}
   11518 #endif
   11519 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11520 
   11521 	sc->phy.release(sc);
   11522 	return rv;
   11523 }
   11524 
   11525 /*
   11526  * wm_gmii_gs40g_readreg:	[mii interface function]
   11527  *
   11528  *	Read a PHY register on the I2100 and I211.
   11529  * This could be handled by the PHY layer if we didn't have to lock the
   11530  * ressource ...
   11531  */
   11532 static int
   11533 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11534 {
   11535 	struct wm_softc *sc = device_private(dev);
   11536 	int page, offset;
   11537 	int rv;
   11538 
   11539 	/* Acquire semaphore */
   11540 	if (sc->phy.acquire(sc)) {
   11541 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11542 		return -1;
   11543 	}
   11544 
   11545 	/* Page select */
   11546 	page = reg >> GS40G_PAGE_SHIFT;
   11547 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11548 	if (rv != 0)
   11549 		goto release;
   11550 
   11551 	/* Read reg */
   11552 	offset = reg & GS40G_OFFSET_MASK;
   11553 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11554 
   11555 release:
   11556 	sc->phy.release(sc);
   11557 	return rv;
   11558 }
   11559 
   11560 /*
   11561  * wm_gmii_gs40g_writereg:	[mii interface function]
   11562  *
   11563  *	Write a PHY register on the I210 and I211.
   11564  * This could be handled by the PHY layer if we didn't have to lock the
   11565  * ressource ...
   11566  */
   11567 static int
   11568 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11569 {
   11570 	struct wm_softc *sc = device_private(dev);
   11571 	uint16_t page;
   11572 	int offset, rv;
   11573 
   11574 	/* Acquire semaphore */
   11575 	if (sc->phy.acquire(sc)) {
   11576 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11577 		return -1;
   11578 	}
   11579 
   11580 	/* Page select */
   11581 	page = reg >> GS40G_PAGE_SHIFT;
   11582 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11583 	if (rv != 0)
   11584 		goto release;
   11585 
   11586 	/* Write reg */
   11587 	offset = reg & GS40G_OFFSET_MASK;
   11588 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11589 
   11590 release:
   11591 	/* Release semaphore */
   11592 	sc->phy.release(sc);
   11593 	return rv;
   11594 }
   11595 
   11596 /*
   11597  * wm_gmii_statchg:	[mii interface function]
   11598  *
   11599  *	Callback from MII layer when media changes.
   11600  */
   11601 static void
   11602 wm_gmii_statchg(struct ifnet *ifp)
   11603 {
   11604 	struct wm_softc *sc = ifp->if_softc;
   11605 	struct mii_data *mii = &sc->sc_mii;
   11606 
   11607 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11608 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11609 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11610 
   11611 	/* Get flow control negotiation result. */
   11612 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11613 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11614 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11615 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11616 	}
   11617 
   11618 	if (sc->sc_flowflags & IFM_FLOW) {
   11619 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11620 			sc->sc_ctrl |= CTRL_TFCE;
   11621 			sc->sc_fcrtl |= FCRTL_XONE;
   11622 		}
   11623 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11624 			sc->sc_ctrl |= CTRL_RFCE;
   11625 	}
   11626 
   11627 	if (mii->mii_media_active & IFM_FDX) {
   11628 		DPRINTF(WM_DEBUG_LINK,
   11629 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11630 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11631 	} else {
   11632 		DPRINTF(WM_DEBUG_LINK,
   11633 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11634 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11635 	}
   11636 
   11637 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11638 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11639 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11640 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11641 	if (sc->sc_type == WM_T_80003) {
   11642 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
   11643 		case IFM_1000_T:
   11644 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11645 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11646 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11647 			break;
   11648 		default:
   11649 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11650 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11651 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11652 			break;
   11653 		}
   11654 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11655 	}
   11656 }
   11657 
   11658 /* kumeran related (80003, ICH* and PCH*) */
   11659 
   11660 /*
   11661  * wm_kmrn_readreg:
   11662  *
   11663  *	Read a kumeran register
   11664  */
   11665 static int
   11666 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11667 {
   11668 	int rv;
   11669 
   11670 	if (sc->sc_type == WM_T_80003)
   11671 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11672 	else
   11673 		rv = sc->phy.acquire(sc);
   11674 	if (rv != 0) {
   11675 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11676 		    __func__);
   11677 		return rv;
   11678 	}
   11679 
   11680 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11681 
   11682 	if (sc->sc_type == WM_T_80003)
   11683 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11684 	else
   11685 		sc->phy.release(sc);
   11686 
   11687 	return rv;
   11688 }
   11689 
   11690 static int
   11691 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11692 {
   11693 
   11694 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11695 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11696 	    KUMCTRLSTA_REN);
   11697 	CSR_WRITE_FLUSH(sc);
   11698 	delay(2);
   11699 
   11700 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11701 
   11702 	return 0;
   11703 }
   11704 
   11705 /*
   11706  * wm_kmrn_writereg:
   11707  *
   11708  *	Write a kumeran register
   11709  */
   11710 static int
   11711 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11712 {
   11713 	int rv;
   11714 
   11715 	if (sc->sc_type == WM_T_80003)
   11716 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11717 	else
   11718 		rv = sc->phy.acquire(sc);
   11719 	if (rv != 0) {
   11720 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11721 		    __func__);
   11722 		return rv;
   11723 	}
   11724 
   11725 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11726 
   11727 	if (sc->sc_type == WM_T_80003)
   11728 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11729 	else
   11730 		sc->phy.release(sc);
   11731 
   11732 	return rv;
   11733 }
   11734 
   11735 static int
   11736 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11737 {
   11738 
   11739 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11740 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11741 
   11742 	return 0;
   11743 }
   11744 
   11745 /*
   11746  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11747  * This access method is different from IEEE MMD.
   11748  */
   11749 static int
   11750 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11751 {
   11752 	struct wm_softc *sc = device_private(dev);
   11753 	int rv;
   11754 
   11755 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11756 	if (rv != 0)
   11757 		return rv;
   11758 
   11759 	if (rd)
   11760 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11761 	else
   11762 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11763 	return rv;
   11764 }
   11765 
   11766 static int
   11767 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11768 {
   11769 
   11770 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11771 }
   11772 
   11773 static int
   11774 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11775 {
   11776 
   11777 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11778 }
   11779 
   11780 /* SGMII related */
   11781 
   11782 /*
   11783  * wm_sgmii_uses_mdio
   11784  *
   11785  * Check whether the transaction is to the internal PHY or the external
   11786  * MDIO interface. Return true if it's MDIO.
   11787  */
   11788 static bool
   11789 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11790 {
   11791 	uint32_t reg;
   11792 	bool ismdio = false;
   11793 
   11794 	switch (sc->sc_type) {
   11795 	case WM_T_82575:
   11796 	case WM_T_82576:
   11797 		reg = CSR_READ(sc, WMREG_MDIC);
   11798 		ismdio = ((reg & MDIC_DEST) != 0);
   11799 		break;
   11800 	case WM_T_82580:
   11801 	case WM_T_I350:
   11802 	case WM_T_I354:
   11803 	case WM_T_I210:
   11804 	case WM_T_I211:
   11805 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11806 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11807 		break;
   11808 	default:
   11809 		break;
   11810 	}
   11811 
   11812 	return ismdio;
   11813 }
   11814 
   11815 /*
   11816  * wm_sgmii_readreg:	[mii interface function]
   11817  *
   11818  *	Read a PHY register on the SGMII
   11819  * This could be handled by the PHY layer if we didn't have to lock the
   11820  * ressource ...
   11821  */
   11822 static int
   11823 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11824 {
   11825 	struct wm_softc *sc = device_private(dev);
   11826 	int rv;
   11827 
   11828 	if (sc->phy.acquire(sc)) {
   11829 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11830 		return -1;
   11831 	}
   11832 
   11833 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11834 
   11835 	sc->phy.release(sc);
   11836 	return rv;
   11837 }
   11838 
   11839 static int
   11840 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11841 {
   11842 	struct wm_softc *sc = device_private(dev);
   11843 	uint32_t i2ccmd;
   11844 	int i, rv = 0;
   11845 
   11846 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11847 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11848 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11849 
   11850 	/* Poll the ready bit */
   11851 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11852 		delay(50);
   11853 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11854 		if (i2ccmd & I2CCMD_READY)
   11855 			break;
   11856 	}
   11857 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11858 		device_printf(dev, "I2CCMD Read did not complete\n");
   11859 		rv = ETIMEDOUT;
   11860 	}
   11861 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11862 		if (!sc->phy.no_errprint)
   11863 			device_printf(dev, "I2CCMD Error bit set\n");
   11864 		rv = EIO;
   11865 	}
   11866 
   11867 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11868 
   11869 	return rv;
   11870 }
   11871 
   11872 /*
   11873  * wm_sgmii_writereg:	[mii interface function]
   11874  *
   11875  *	Write a PHY register on the SGMII.
   11876  * This could be handled by the PHY layer if we didn't have to lock the
   11877  * ressource ...
   11878  */
   11879 static int
   11880 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11881 {
   11882 	struct wm_softc *sc = device_private(dev);
   11883 	int rv;
   11884 
   11885 	if (sc->phy.acquire(sc) != 0) {
   11886 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11887 		return -1;
   11888 	}
   11889 
   11890 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11891 
   11892 	sc->phy.release(sc);
   11893 
   11894 	return rv;
   11895 }
   11896 
   11897 static int
   11898 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11899 {
   11900 	struct wm_softc *sc = device_private(dev);
   11901 	uint32_t i2ccmd;
   11902 	uint16_t swapdata;
   11903 	int rv = 0;
   11904 	int i;
   11905 
   11906 	/* Swap the data bytes for the I2C interface */
   11907 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11908 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11909 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11910 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11911 
   11912 	/* Poll the ready bit */
   11913 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11914 		delay(50);
   11915 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11916 		if (i2ccmd & I2CCMD_READY)
   11917 			break;
   11918 	}
   11919 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11920 		device_printf(dev, "I2CCMD Write did not complete\n");
   11921 		rv = ETIMEDOUT;
   11922 	}
   11923 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11924 		device_printf(dev, "I2CCMD Error bit set\n");
   11925 		rv = EIO;
   11926 	}
   11927 
   11928 	return rv;
   11929 }
   11930 
   11931 /* TBI related */
   11932 
   11933 static bool
   11934 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11935 {
   11936 	bool sig;
   11937 
   11938 	sig = ctrl & CTRL_SWDPIN(1);
   11939 
   11940 	/*
   11941 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11942 	 * detect a signal, 1 if they don't.
   11943 	 */
   11944 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11945 		sig = !sig;
   11946 
   11947 	return sig;
   11948 }
   11949 
   11950 /*
   11951  * wm_tbi_mediainit:
   11952  *
   11953  *	Initialize media for use on 1000BASE-X devices.
   11954  */
   11955 static void
   11956 wm_tbi_mediainit(struct wm_softc *sc)
   11957 {
   11958 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11959 	const char *sep = "";
   11960 
   11961 	if (sc->sc_type < WM_T_82543)
   11962 		sc->sc_tipg = TIPG_WM_DFLT;
   11963 	else
   11964 		sc->sc_tipg = TIPG_LG_DFLT;
   11965 
   11966 	sc->sc_tbi_serdes_anegticks = 5;
   11967 
   11968 	/* Initialize our media structures */
   11969 	sc->sc_mii.mii_ifp = ifp;
   11970 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11971 
   11972 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11973 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11974 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11975 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11976 	else
   11977 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11978 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11979 
   11980 	/*
   11981 	 * SWD Pins:
   11982 	 *
   11983 	 *	0 = Link LED (output)
   11984 	 *	1 = Loss Of Signal (input)
   11985 	 */
   11986 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11987 
   11988 	/* XXX Perhaps this is only for TBI */
   11989 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11990 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11991 
   11992 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11993 		sc->sc_ctrl &= ~CTRL_LRST;
   11994 
   11995 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11996 
   11997 #define	ADD(ss, mm, dd)							\
   11998 do {									\
   11999 	aprint_normal("%s%s", sep, ss);					\
   12000 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   12001 	sep = ", ";							\
   12002 } while (/*CONSTCOND*/0)
   12003 
   12004 	aprint_normal_dev(sc->sc_dev, "");
   12005 
   12006 	if (sc->sc_type == WM_T_I354) {
   12007 		uint32_t status;
   12008 
   12009 		status = CSR_READ(sc, WMREG_STATUS);
   12010 		if (((status & STATUS_2P5_SKU) != 0)
   12011 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12012 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   12013 		} else
   12014 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   12015 	} else if (sc->sc_type == WM_T_82545) {
   12016 		/* Only 82545 is LX (XXX except SFP) */
   12017 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12018 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12019 	} else if (sc->sc_sfptype != 0) {
   12020 		/* XXX wm(4) fiber/serdes don't use ifm_data */
   12021 		switch (sc->sc_sfptype) {
   12022 		default:
   12023 		case SFF_SFP_ETH_FLAGS_1000SX:
   12024 			ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12025 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12026 			break;
   12027 		case SFF_SFP_ETH_FLAGS_1000LX:
   12028 			ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   12029 			ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   12030 			break;
   12031 		case SFF_SFP_ETH_FLAGS_1000CX:
   12032 			ADD("1000baseCX", IFM_1000_CX, ANAR_X_HD);
   12033 			ADD("1000baseCX-FDX", IFM_1000_CX | IFM_FDX, ANAR_X_FD);
   12034 			break;
   12035 		case SFF_SFP_ETH_FLAGS_1000T:
   12036 			ADD("1000baseT", IFM_1000_T, 0);
   12037 			ADD("1000baseT-FDX", IFM_1000_T | IFM_FDX, 0);
   12038 			break;
   12039 		case SFF_SFP_ETH_FLAGS_100FX:
   12040 			ADD("100baseFX", IFM_100_FX, ANAR_TX);
   12041 			ADD("100baseFX-FDX", IFM_100_FX | IFM_FDX, ANAR_TX_FD);
   12042 			break;
   12043 		}
   12044 	} else {
   12045 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   12046 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   12047 	}
   12048 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   12049 	aprint_normal("\n");
   12050 
   12051 #undef ADD
   12052 
   12053 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   12054 }
   12055 
   12056 /*
   12057  * wm_tbi_mediachange:	[ifmedia interface function]
   12058  *
   12059  *	Set hardware to newly-selected media on a 1000BASE-X device.
   12060  */
   12061 static int
   12062 wm_tbi_mediachange(struct ifnet *ifp)
   12063 {
   12064 	struct wm_softc *sc = ifp->if_softc;
   12065 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12066 	uint32_t status, ctrl;
   12067 	bool signal;
   12068 	int i;
   12069 
   12070 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   12071 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12072 		/* XXX need some work for >= 82571 and < 82575 */
   12073 		if (sc->sc_type < WM_T_82575)
   12074 			return 0;
   12075 	}
   12076 
   12077 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12078 	    || (sc->sc_type >= WM_T_82575))
   12079 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12080 
   12081 	sc->sc_ctrl &= ~CTRL_LRST;
   12082 	sc->sc_txcw = TXCW_ANE;
   12083 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12084 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   12085 	else if (ife->ifm_media & IFM_FDX)
   12086 		sc->sc_txcw |= TXCW_FD;
   12087 	else
   12088 		sc->sc_txcw |= TXCW_HD;
   12089 
   12090 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   12091 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   12092 
   12093 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   12094 		device_xname(sc->sc_dev), sc->sc_txcw));
   12095 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12096 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12097 	CSR_WRITE_FLUSH(sc);
   12098 	delay(1000);
   12099 
   12100 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12101 	signal = wm_tbi_havesignal(sc, ctrl);
   12102 
   12103 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   12104 		signal));
   12105 
   12106 	if (signal) {
   12107 		/* Have signal; wait for the link to come up. */
   12108 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   12109 			delay(10000);
   12110 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   12111 				break;
   12112 		}
   12113 
   12114 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   12115 			device_xname(sc->sc_dev), i));
   12116 
   12117 		status = CSR_READ(sc, WMREG_STATUS);
   12118 		DPRINTF(WM_DEBUG_LINK,
   12119 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   12120 			device_xname(sc->sc_dev), status, STATUS_LU));
   12121 		if (status & STATUS_LU) {
   12122 			/* Link is up. */
   12123 			DPRINTF(WM_DEBUG_LINK,
   12124 			    ("%s: LINK: set media -> link up %s\n",
   12125 				device_xname(sc->sc_dev),
   12126 				(status & STATUS_FD) ? "FDX" : "HDX"));
   12127 
   12128 			/*
   12129 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   12130 			 * so we should update sc->sc_ctrl
   12131 			 */
   12132 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   12133 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   12134 			sc->sc_fcrtl &= ~FCRTL_XONE;
   12135 			if (status & STATUS_FD)
   12136 				sc->sc_tctl |=
   12137 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   12138 			else
   12139 				sc->sc_tctl |=
   12140 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   12141 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   12142 				sc->sc_fcrtl |= FCRTL_XONE;
   12143 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   12144 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   12145 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   12146 			sc->sc_tbi_linkup = 1;
   12147 		} else {
   12148 			if (i == WM_LINKUP_TIMEOUT)
   12149 				wm_check_for_link(sc);
   12150 			/* Link is down. */
   12151 			DPRINTF(WM_DEBUG_LINK,
   12152 			    ("%s: LINK: set media -> link down\n",
   12153 				device_xname(sc->sc_dev)));
   12154 			sc->sc_tbi_linkup = 0;
   12155 		}
   12156 	} else {
   12157 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   12158 			device_xname(sc->sc_dev)));
   12159 		sc->sc_tbi_linkup = 0;
   12160 	}
   12161 
   12162 	wm_tbi_serdes_set_linkled(sc);
   12163 
   12164 	return 0;
   12165 }
   12166 
   12167 /*
   12168  * wm_tbi_mediastatus:	[ifmedia interface function]
   12169  *
   12170  *	Get the current interface media status on a 1000BASE-X device.
   12171  */
   12172 static void
   12173 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12174 {
   12175 	struct wm_softc *sc = ifp->if_softc;
   12176 	uint32_t ctrl, status;
   12177 
   12178 	ifmr->ifm_status = IFM_AVALID;
   12179 	ifmr->ifm_active = IFM_ETHER;
   12180 
   12181 	status = CSR_READ(sc, WMREG_STATUS);
   12182 	if ((status & STATUS_LU) == 0) {
   12183 		ifmr->ifm_active |= IFM_NONE;
   12184 		return;
   12185 	}
   12186 
   12187 	ifmr->ifm_status |= IFM_ACTIVE;
   12188 	/* Only 82545 is LX */
   12189 	if (sc->sc_type == WM_T_82545)
   12190 		ifmr->ifm_active |= IFM_1000_LX;
   12191 	else
   12192 		ifmr->ifm_active |= IFM_1000_SX;
   12193 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   12194 		ifmr->ifm_active |= IFM_FDX;
   12195 	else
   12196 		ifmr->ifm_active |= IFM_HDX;
   12197 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12198 	if (ctrl & CTRL_RFCE)
   12199 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   12200 	if (ctrl & CTRL_TFCE)
   12201 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   12202 }
   12203 
   12204 /* XXX TBI only */
   12205 static int
   12206 wm_check_for_link(struct wm_softc *sc)
   12207 {
   12208 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12209 	uint32_t rxcw;
   12210 	uint32_t ctrl;
   12211 	uint32_t status;
   12212 	bool signal;
   12213 
   12214 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   12215 		device_xname(sc->sc_dev), __func__));
   12216 
   12217 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   12218 		/* XXX need some work for >= 82571 */
   12219 		if (sc->sc_type >= WM_T_82571) {
   12220 			sc->sc_tbi_linkup = 1;
   12221 			return 0;
   12222 		}
   12223 	}
   12224 
   12225 	rxcw = CSR_READ(sc, WMREG_RXCW);
   12226 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12227 	status = CSR_READ(sc, WMREG_STATUS);
   12228 	signal = wm_tbi_havesignal(sc, ctrl);
   12229 
   12230 	DPRINTF(WM_DEBUG_LINK,
   12231 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   12232 		device_xname(sc->sc_dev), __func__, signal,
   12233 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   12234 
   12235 	/*
   12236 	 * SWDPIN   LU RXCW
   12237 	 *	0    0	  0
   12238 	 *	0    0	  1	(should not happen)
   12239 	 *	0    1	  0	(should not happen)
   12240 	 *	0    1	  1	(should not happen)
   12241 	 *	1    0	  0	Disable autonego and force linkup
   12242 	 *	1    0	  1	got /C/ but not linkup yet
   12243 	 *	1    1	  0	(linkup)
   12244 	 *	1    1	  1	If IFM_AUTO, back to autonego
   12245 	 *
   12246 	 */
   12247 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   12248 		DPRINTF(WM_DEBUG_LINK,
   12249 		    ("%s: %s: force linkup and fullduplex\n",
   12250 			device_xname(sc->sc_dev), __func__));
   12251 		sc->sc_tbi_linkup = 0;
   12252 		/* Disable auto-negotiation in the TXCW register */
   12253 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   12254 
   12255 		/*
   12256 		 * Force link-up and also force full-duplex.
   12257 		 *
   12258 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   12259 		 * so we should update sc->sc_ctrl
   12260 		 */
   12261 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   12262 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12263 	} else if (((status & STATUS_LU) != 0)
   12264 	    && ((rxcw & RXCW_C) != 0)
   12265 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   12266 		sc->sc_tbi_linkup = 1;
   12267 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   12268 			device_xname(sc->sc_dev),
   12269 			__func__));
   12270 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12271 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   12272 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   12273 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   12274 			device_xname(sc->sc_dev), __func__));
   12275 	} else {
   12276 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   12277 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   12278 			status));
   12279 	}
   12280 
   12281 	return 0;
   12282 }
   12283 
   12284 /*
   12285  * wm_tbi_tick:
   12286  *
   12287  *	Check the link on TBI devices.
   12288  *	This function acts as mii_tick().
   12289  */
   12290 static void
   12291 wm_tbi_tick(struct wm_softc *sc)
   12292 {
   12293 	struct mii_data *mii = &sc->sc_mii;
   12294 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12295 	uint32_t status;
   12296 
   12297 	KASSERT(WM_CORE_LOCKED(sc));
   12298 
   12299 	status = CSR_READ(sc, WMREG_STATUS);
   12300 
   12301 	/* XXX is this needed? */
   12302 	(void)CSR_READ(sc, WMREG_RXCW);
   12303 	(void)CSR_READ(sc, WMREG_CTRL);
   12304 
   12305 	/* set link status */
   12306 	if ((status & STATUS_LU) == 0) {
   12307 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   12308 			device_xname(sc->sc_dev)));
   12309 		sc->sc_tbi_linkup = 0;
   12310 	} else if (sc->sc_tbi_linkup == 0) {
   12311 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   12312 			device_xname(sc->sc_dev),
   12313 			(status & STATUS_FD) ? "FDX" : "HDX"));
   12314 		sc->sc_tbi_linkup = 1;
   12315 		sc->sc_tbi_serdes_ticks = 0;
   12316 	}
   12317 
   12318 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   12319 		goto setled;
   12320 
   12321 	if ((status & STATUS_LU) == 0) {
   12322 		sc->sc_tbi_linkup = 0;
   12323 		/* If the timer expired, retry autonegotiation */
   12324 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12325 		    && (++sc->sc_tbi_serdes_ticks
   12326 			>= sc->sc_tbi_serdes_anegticks)) {
   12327 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12328 				device_xname(sc->sc_dev), __func__));
   12329 			sc->sc_tbi_serdes_ticks = 0;
   12330 			/*
   12331 			 * Reset the link, and let autonegotiation do
   12332 			 * its thing
   12333 			 */
   12334 			sc->sc_ctrl |= CTRL_LRST;
   12335 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12336 			CSR_WRITE_FLUSH(sc);
   12337 			delay(1000);
   12338 			sc->sc_ctrl &= ~CTRL_LRST;
   12339 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12340 			CSR_WRITE_FLUSH(sc);
   12341 			delay(1000);
   12342 			CSR_WRITE(sc, WMREG_TXCW,
   12343 			    sc->sc_txcw & ~TXCW_ANE);
   12344 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   12345 		}
   12346 	}
   12347 
   12348 setled:
   12349 	wm_tbi_serdes_set_linkled(sc);
   12350 }
   12351 
   12352 /* SERDES related */
   12353 static void
   12354 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   12355 {
   12356 	uint32_t reg;
   12357 
   12358 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12359 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12360 		return;
   12361 
   12362 	/* Enable PCS to turn on link */
   12363 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   12364 	reg |= PCS_CFG_PCS_EN;
   12365 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   12366 
   12367 	/* Power up the laser */
   12368 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12369 	reg &= ~CTRL_EXT_SWDPIN(3);
   12370 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12371 
   12372 	/* Flush the write to verify completion */
   12373 	CSR_WRITE_FLUSH(sc);
   12374 	delay(1000);
   12375 }
   12376 
   12377 static int
   12378 wm_serdes_mediachange(struct ifnet *ifp)
   12379 {
   12380 	struct wm_softc *sc = ifp->if_softc;
   12381 	bool pcs_autoneg = true; /* XXX */
   12382 	uint32_t ctrl_ext, pcs_lctl, reg;
   12383 
   12384 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   12385 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   12386 		return 0;
   12387 
   12388 	/* XXX Currently, this function is not called on 8257[12] */
   12389 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   12390 	    || (sc->sc_type >= WM_T_82575))
   12391 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   12392 
   12393 	/* Power on the sfp cage if present */
   12394 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12395 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12396 	ctrl_ext |= CTRL_EXT_I2C_ENA;
   12397 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12398 
   12399 	sc->sc_ctrl |= CTRL_SLU;
   12400 
   12401 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12402 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12403 
   12404 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12405 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12406 	case CTRL_EXT_LINK_MODE_SGMII:
   12407 		/* SGMII mode lets the phy handle forcing speed/duplex */
   12408 		pcs_autoneg = true;
   12409 		/* Autoneg time out should be disabled for SGMII mode */
   12410 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12411 		break;
   12412 	case CTRL_EXT_LINK_MODE_1000KX:
   12413 		pcs_autoneg = false;
   12414 		/* FALLTHROUGH */
   12415 	default:
   12416 		if ((sc->sc_type == WM_T_82575)
   12417 		    || (sc->sc_type == WM_T_82576)) {
   12418 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12419 				pcs_autoneg = false;
   12420 		}
   12421 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12422 		    | CTRL_FRCFDX;
   12423 
   12424 		/* Set speed of 1000/Full if speed/duplex is forced */
   12425 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12426 	}
   12427 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12428 
   12429 	pcs_lctl &= ~(PCS_LCTL_AN_ENABLE | PCS_LCTL_FLV_LINK_UP |
   12430 	    PCS_LCTL_FSD | PCS_LCTL_FORCE_LINK);
   12431 
   12432 	if (pcs_autoneg) {
   12433 		/* Set PCS register for autoneg */
   12434 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12435 
   12436 		/* Disable force flow control for autoneg */
   12437 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12438 
   12439 		/* Configure flow control advertisement for autoneg */
   12440 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12441 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12442 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12443 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12444 	} else
   12445 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12446 
   12447 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12448 
   12449 	return 0;
   12450 }
   12451 
   12452 static void
   12453 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12454 {
   12455 	struct wm_softc *sc = ifp->if_softc;
   12456 	struct mii_data *mii = &sc->sc_mii;
   12457 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12458 	uint32_t pcs_adv, pcs_lpab, reg;
   12459 
   12460 	ifmr->ifm_status = IFM_AVALID;
   12461 	ifmr->ifm_active = IFM_ETHER;
   12462 
   12463 	/* Check PCS */
   12464 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12465 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12466 		ifmr->ifm_active |= IFM_NONE;
   12467 		sc->sc_tbi_linkup = 0;
   12468 		goto setled;
   12469 	}
   12470 
   12471 	sc->sc_tbi_linkup = 1;
   12472 	ifmr->ifm_status |= IFM_ACTIVE;
   12473 	if (sc->sc_type == WM_T_I354) {
   12474 		uint32_t status;
   12475 
   12476 		status = CSR_READ(sc, WMREG_STATUS);
   12477 		if (((status & STATUS_2P5_SKU) != 0)
   12478 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12479 			ifmr->ifm_active |= IFM_2500_KX;
   12480 		} else
   12481 			ifmr->ifm_active |= IFM_1000_KX;
   12482 	} else {
   12483 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12484 		case PCS_LSTS_SPEED_10:
   12485 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12486 			break;
   12487 		case PCS_LSTS_SPEED_100:
   12488 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12489 			break;
   12490 		case PCS_LSTS_SPEED_1000:
   12491 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12492 			break;
   12493 		default:
   12494 			device_printf(sc->sc_dev, "Unknown speed\n");
   12495 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12496 			break;
   12497 		}
   12498 	}
   12499 	if ((reg & PCS_LSTS_FDX) != 0)
   12500 		ifmr->ifm_active |= IFM_FDX;
   12501 	else
   12502 		ifmr->ifm_active |= IFM_HDX;
   12503 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12504 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12505 		/* Check flow */
   12506 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12507 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12508 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12509 			goto setled;
   12510 		}
   12511 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12512 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12513 		DPRINTF(WM_DEBUG_LINK,
   12514 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12515 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12516 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12517 			mii->mii_media_active |= IFM_FLOW
   12518 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12519 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12520 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12521 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12522 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12523 			mii->mii_media_active |= IFM_FLOW
   12524 			    | IFM_ETH_TXPAUSE;
   12525 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12526 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12527 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12528 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12529 			mii->mii_media_active |= IFM_FLOW
   12530 			    | IFM_ETH_RXPAUSE;
   12531 		}
   12532 	}
   12533 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12534 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12535 setled:
   12536 	wm_tbi_serdes_set_linkled(sc);
   12537 }
   12538 
   12539 /*
   12540  * wm_serdes_tick:
   12541  *
   12542  *	Check the link on serdes devices.
   12543  */
   12544 static void
   12545 wm_serdes_tick(struct wm_softc *sc)
   12546 {
   12547 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12548 	struct mii_data *mii = &sc->sc_mii;
   12549 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12550 	uint32_t reg;
   12551 
   12552 	KASSERT(WM_CORE_LOCKED(sc));
   12553 
   12554 	mii->mii_media_status = IFM_AVALID;
   12555 	mii->mii_media_active = IFM_ETHER;
   12556 
   12557 	/* Check PCS */
   12558 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12559 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12560 		mii->mii_media_status |= IFM_ACTIVE;
   12561 		sc->sc_tbi_linkup = 1;
   12562 		sc->sc_tbi_serdes_ticks = 0;
   12563 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12564 		if ((reg & PCS_LSTS_FDX) != 0)
   12565 			mii->mii_media_active |= IFM_FDX;
   12566 		else
   12567 			mii->mii_media_active |= IFM_HDX;
   12568 	} else {
   12569 		mii->mii_media_status |= IFM_NONE;
   12570 		sc->sc_tbi_linkup = 0;
   12571 		/* If the timer expired, retry autonegotiation */
   12572 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12573 		    && (++sc->sc_tbi_serdes_ticks
   12574 			>= sc->sc_tbi_serdes_anegticks)) {
   12575 			DPRINTF(WM_DEBUG_LINK, ("%s: %s: EXPIRE\n",
   12576 				device_xname(sc->sc_dev), __func__));
   12577 			sc->sc_tbi_serdes_ticks = 0;
   12578 			/* XXX */
   12579 			wm_serdes_mediachange(ifp);
   12580 		}
   12581 	}
   12582 
   12583 	wm_tbi_serdes_set_linkled(sc);
   12584 }
   12585 
   12586 /* SFP related */
   12587 
   12588 static int
   12589 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12590 {
   12591 	uint32_t i2ccmd;
   12592 	int i;
   12593 
   12594 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12595 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12596 
   12597 	/* Poll the ready bit */
   12598 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12599 		delay(50);
   12600 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12601 		if (i2ccmd & I2CCMD_READY)
   12602 			break;
   12603 	}
   12604 	if ((i2ccmd & I2CCMD_READY) == 0)
   12605 		return -1;
   12606 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12607 		return -1;
   12608 
   12609 	*data = i2ccmd & 0x00ff;
   12610 
   12611 	return 0;
   12612 }
   12613 
   12614 static uint32_t
   12615 wm_sfp_get_media_type(struct wm_softc *sc)
   12616 {
   12617 	uint32_t ctrl_ext;
   12618 	uint8_t val = 0;
   12619 	int timeout = 3;
   12620 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12621 	int rv = -1;
   12622 
   12623 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12624 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12625 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12626 	CSR_WRITE_FLUSH(sc);
   12627 
   12628 	/* Read SFP module data */
   12629 	while (timeout) {
   12630 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12631 		if (rv == 0)
   12632 			break;
   12633 		delay(100*1000); /* XXX too big */
   12634 		timeout--;
   12635 	}
   12636 	if (rv != 0)
   12637 		goto out;
   12638 
   12639 	switch (val) {
   12640 	case SFF_SFP_ID_SFF:
   12641 		aprint_normal_dev(sc->sc_dev,
   12642 		    "Module/Connector soldered to board\n");
   12643 		break;
   12644 	case SFF_SFP_ID_SFP:
   12645 		sc->sc_flags |= WM_F_SFP;
   12646 		break;
   12647 	case SFF_SFP_ID_UNKNOWN:
   12648 		goto out;
   12649 	default:
   12650 		break;
   12651 	}
   12652 
   12653 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12654 	if (rv != 0)
   12655 		goto out;
   12656 
   12657 	sc->sc_sfptype = val;
   12658 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12659 		mediatype = WM_MEDIATYPE_SERDES;
   12660 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12661 		sc->sc_flags |= WM_F_SGMII;
   12662 		mediatype = WM_MEDIATYPE_COPPER;
   12663 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12664 		sc->sc_flags |= WM_F_SGMII;
   12665 		mediatype = WM_MEDIATYPE_SERDES;
   12666 	} else {
   12667 		device_printf(sc->sc_dev, "%s: unknown media type? (0x%hhx)\n",
   12668 		    __func__, sc->sc_sfptype);
   12669 		sc->sc_sfptype = 0; /* XXX unknown */
   12670 	}
   12671 
   12672 out:
   12673 	/* Restore I2C interface setting */
   12674 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12675 
   12676 	return mediatype;
   12677 }
   12678 
   12679 /*
   12680  * NVM related.
   12681  * Microwire, SPI (w/wo EERD) and Flash.
   12682  */
   12683 
   12684 /* Both spi and uwire */
   12685 
   12686 /*
   12687  * wm_eeprom_sendbits:
   12688  *
   12689  *	Send a series of bits to the EEPROM.
   12690  */
   12691 static void
   12692 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12693 {
   12694 	uint32_t reg;
   12695 	int x;
   12696 
   12697 	reg = CSR_READ(sc, WMREG_EECD);
   12698 
   12699 	for (x = nbits; x > 0; x--) {
   12700 		if (bits & (1U << (x - 1)))
   12701 			reg |= EECD_DI;
   12702 		else
   12703 			reg &= ~EECD_DI;
   12704 		CSR_WRITE(sc, WMREG_EECD, reg);
   12705 		CSR_WRITE_FLUSH(sc);
   12706 		delay(2);
   12707 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12708 		CSR_WRITE_FLUSH(sc);
   12709 		delay(2);
   12710 		CSR_WRITE(sc, WMREG_EECD, reg);
   12711 		CSR_WRITE_FLUSH(sc);
   12712 		delay(2);
   12713 	}
   12714 }
   12715 
   12716 /*
   12717  * wm_eeprom_recvbits:
   12718  *
   12719  *	Receive a series of bits from the EEPROM.
   12720  */
   12721 static void
   12722 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12723 {
   12724 	uint32_t reg, val;
   12725 	int x;
   12726 
   12727 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12728 
   12729 	val = 0;
   12730 	for (x = nbits; x > 0; x--) {
   12731 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12732 		CSR_WRITE_FLUSH(sc);
   12733 		delay(2);
   12734 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12735 			val |= (1U << (x - 1));
   12736 		CSR_WRITE(sc, WMREG_EECD, reg);
   12737 		CSR_WRITE_FLUSH(sc);
   12738 		delay(2);
   12739 	}
   12740 	*valp = val;
   12741 }
   12742 
   12743 /* Microwire */
   12744 
   12745 /*
   12746  * wm_nvm_read_uwire:
   12747  *
   12748  *	Read a word from the EEPROM using the MicroWire protocol.
   12749  */
   12750 static int
   12751 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12752 {
   12753 	uint32_t reg, val;
   12754 	int i;
   12755 
   12756 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12757 		device_xname(sc->sc_dev), __func__));
   12758 
   12759 	if (sc->nvm.acquire(sc) != 0)
   12760 		return -1;
   12761 
   12762 	for (i = 0; i < wordcnt; i++) {
   12763 		/* Clear SK and DI. */
   12764 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12765 		CSR_WRITE(sc, WMREG_EECD, reg);
   12766 
   12767 		/*
   12768 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12769 		 * and Xen.
   12770 		 *
   12771 		 * We use this workaround only for 82540 because qemu's
   12772 		 * e1000 act as 82540.
   12773 		 */
   12774 		if (sc->sc_type == WM_T_82540) {
   12775 			reg |= EECD_SK;
   12776 			CSR_WRITE(sc, WMREG_EECD, reg);
   12777 			reg &= ~EECD_SK;
   12778 			CSR_WRITE(sc, WMREG_EECD, reg);
   12779 			CSR_WRITE_FLUSH(sc);
   12780 			delay(2);
   12781 		}
   12782 		/* XXX: end of workaround */
   12783 
   12784 		/* Set CHIP SELECT. */
   12785 		reg |= EECD_CS;
   12786 		CSR_WRITE(sc, WMREG_EECD, reg);
   12787 		CSR_WRITE_FLUSH(sc);
   12788 		delay(2);
   12789 
   12790 		/* Shift in the READ command. */
   12791 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12792 
   12793 		/* Shift in address. */
   12794 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12795 
   12796 		/* Shift out the data. */
   12797 		wm_eeprom_recvbits(sc, &val, 16);
   12798 		data[i] = val & 0xffff;
   12799 
   12800 		/* Clear CHIP SELECT. */
   12801 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12802 		CSR_WRITE(sc, WMREG_EECD, reg);
   12803 		CSR_WRITE_FLUSH(sc);
   12804 		delay(2);
   12805 	}
   12806 
   12807 	sc->nvm.release(sc);
   12808 	return 0;
   12809 }
   12810 
   12811 /* SPI */
   12812 
   12813 /*
   12814  * Set SPI and FLASH related information from the EECD register.
   12815  * For 82541 and 82547, the word size is taken from EEPROM.
   12816  */
   12817 static int
   12818 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12819 {
   12820 	int size;
   12821 	uint32_t reg;
   12822 	uint16_t data;
   12823 
   12824 	reg = CSR_READ(sc, WMREG_EECD);
   12825 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12826 
   12827 	/* Read the size of NVM from EECD by default */
   12828 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12829 	switch (sc->sc_type) {
   12830 	case WM_T_82541:
   12831 	case WM_T_82541_2:
   12832 	case WM_T_82547:
   12833 	case WM_T_82547_2:
   12834 		/* Set dummy value to access EEPROM */
   12835 		sc->sc_nvm_wordsize = 64;
   12836 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12837 			aprint_error_dev(sc->sc_dev,
   12838 			    "%s: failed to read EEPROM size\n", __func__);
   12839 		}
   12840 		reg = data;
   12841 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12842 		if (size == 0)
   12843 			size = 6; /* 64 word size */
   12844 		else
   12845 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12846 		break;
   12847 	case WM_T_80003:
   12848 	case WM_T_82571:
   12849 	case WM_T_82572:
   12850 	case WM_T_82573: /* SPI case */
   12851 	case WM_T_82574: /* SPI case */
   12852 	case WM_T_82583: /* SPI case */
   12853 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12854 		if (size > 14)
   12855 			size = 14;
   12856 		break;
   12857 	case WM_T_82575:
   12858 	case WM_T_82576:
   12859 	case WM_T_82580:
   12860 	case WM_T_I350:
   12861 	case WM_T_I354:
   12862 	case WM_T_I210:
   12863 	case WM_T_I211:
   12864 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12865 		if (size > 15)
   12866 			size = 15;
   12867 		break;
   12868 	default:
   12869 		aprint_error_dev(sc->sc_dev,
   12870 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12871 		return -1;
   12872 		break;
   12873 	}
   12874 
   12875 	sc->sc_nvm_wordsize = 1 << size;
   12876 
   12877 	return 0;
   12878 }
   12879 
   12880 /*
   12881  * wm_nvm_ready_spi:
   12882  *
   12883  *	Wait for a SPI EEPROM to be ready for commands.
   12884  */
   12885 static int
   12886 wm_nvm_ready_spi(struct wm_softc *sc)
   12887 {
   12888 	uint32_t val;
   12889 	int usec;
   12890 
   12891 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12892 		device_xname(sc->sc_dev), __func__));
   12893 
   12894 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12895 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12896 		wm_eeprom_recvbits(sc, &val, 8);
   12897 		if ((val & SPI_SR_RDY) == 0)
   12898 			break;
   12899 	}
   12900 	if (usec >= SPI_MAX_RETRIES) {
   12901 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12902 		return -1;
   12903 	}
   12904 	return 0;
   12905 }
   12906 
   12907 /*
   12908  * wm_nvm_read_spi:
   12909  *
   12910  *	Read a work from the EEPROM using the SPI protocol.
   12911  */
   12912 static int
   12913 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12914 {
   12915 	uint32_t reg, val;
   12916 	int i;
   12917 	uint8_t opc;
   12918 	int rv = 0;
   12919 
   12920 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12921 		device_xname(sc->sc_dev), __func__));
   12922 
   12923 	if (sc->nvm.acquire(sc) != 0)
   12924 		return -1;
   12925 
   12926 	/* Clear SK and CS. */
   12927 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12928 	CSR_WRITE(sc, WMREG_EECD, reg);
   12929 	CSR_WRITE_FLUSH(sc);
   12930 	delay(2);
   12931 
   12932 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12933 		goto out;
   12934 
   12935 	/* Toggle CS to flush commands. */
   12936 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12937 	CSR_WRITE_FLUSH(sc);
   12938 	delay(2);
   12939 	CSR_WRITE(sc, WMREG_EECD, reg);
   12940 	CSR_WRITE_FLUSH(sc);
   12941 	delay(2);
   12942 
   12943 	opc = SPI_OPC_READ;
   12944 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12945 		opc |= SPI_OPC_A8;
   12946 
   12947 	wm_eeprom_sendbits(sc, opc, 8);
   12948 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12949 
   12950 	for (i = 0; i < wordcnt; i++) {
   12951 		wm_eeprom_recvbits(sc, &val, 16);
   12952 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12953 	}
   12954 
   12955 	/* Raise CS and clear SK. */
   12956 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12957 	CSR_WRITE(sc, WMREG_EECD, reg);
   12958 	CSR_WRITE_FLUSH(sc);
   12959 	delay(2);
   12960 
   12961 out:
   12962 	sc->nvm.release(sc);
   12963 	return rv;
   12964 }
   12965 
   12966 /* Using with EERD */
   12967 
   12968 static int
   12969 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12970 {
   12971 	uint32_t attempts = 100000;
   12972 	uint32_t i, reg = 0;
   12973 	int32_t done = -1;
   12974 
   12975 	for (i = 0; i < attempts; i++) {
   12976 		reg = CSR_READ(sc, rw);
   12977 
   12978 		if (reg & EERD_DONE) {
   12979 			done = 0;
   12980 			break;
   12981 		}
   12982 		delay(5);
   12983 	}
   12984 
   12985 	return done;
   12986 }
   12987 
   12988 static int
   12989 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12990 {
   12991 	int i, eerd = 0;
   12992 	int rv = 0;
   12993 
   12994 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12995 		device_xname(sc->sc_dev), __func__));
   12996 
   12997 	if (sc->nvm.acquire(sc) != 0)
   12998 		return -1;
   12999 
   13000 	for (i = 0; i < wordcnt; i++) {
   13001 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   13002 		CSR_WRITE(sc, WMREG_EERD, eerd);
   13003 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   13004 		if (rv != 0) {
   13005 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   13006 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   13007 			break;
   13008 		}
   13009 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   13010 	}
   13011 
   13012 	sc->nvm.release(sc);
   13013 	return rv;
   13014 }
   13015 
   13016 /* Flash */
   13017 
   13018 static int
   13019 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   13020 {
   13021 	uint32_t eecd;
   13022 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   13023 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   13024 	uint32_t nvm_dword = 0;
   13025 	uint8_t sig_byte = 0;
   13026 	int rv;
   13027 
   13028 	switch (sc->sc_type) {
   13029 	case WM_T_PCH_SPT:
   13030 	case WM_T_PCH_CNP:
   13031 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   13032 		act_offset = ICH_NVM_SIG_WORD * 2;
   13033 
   13034 		/* Set bank to 0 in case flash read fails. */
   13035 		*bank = 0;
   13036 
   13037 		/* Check bank 0 */
   13038 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   13039 		if (rv != 0)
   13040 			return rv;
   13041 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13042 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13043 			*bank = 0;
   13044 			return 0;
   13045 		}
   13046 
   13047 		/* Check bank 1 */
   13048 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   13049 		    &nvm_dword);
   13050 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   13051 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13052 			*bank = 1;
   13053 			return 0;
   13054 		}
   13055 		aprint_error_dev(sc->sc_dev,
   13056 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   13057 		return -1;
   13058 	case WM_T_ICH8:
   13059 	case WM_T_ICH9:
   13060 		eecd = CSR_READ(sc, WMREG_EECD);
   13061 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   13062 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   13063 			return 0;
   13064 		}
   13065 		/* FALLTHROUGH */
   13066 	default:
   13067 		/* Default to 0 */
   13068 		*bank = 0;
   13069 
   13070 		/* Check bank 0 */
   13071 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   13072 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13073 			*bank = 0;
   13074 			return 0;
   13075 		}
   13076 
   13077 		/* Check bank 1 */
   13078 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   13079 		    &sig_byte);
   13080 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   13081 			*bank = 1;
   13082 			return 0;
   13083 		}
   13084 	}
   13085 
   13086 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   13087 		device_xname(sc->sc_dev)));
   13088 	return -1;
   13089 }
   13090 
   13091 /******************************************************************************
   13092  * This function does initial flash setup so that a new read/write/erase cycle
   13093  * can be started.
   13094  *
   13095  * sc - The pointer to the hw structure
   13096  ****************************************************************************/
   13097 static int32_t
   13098 wm_ich8_cycle_init(struct wm_softc *sc)
   13099 {
   13100 	uint16_t hsfsts;
   13101 	int32_t error = 1;
   13102 	int32_t i     = 0;
   13103 
   13104 	if (sc->sc_type >= WM_T_PCH_SPT)
   13105 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   13106 	else
   13107 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13108 
   13109 	/* May be check the Flash Des Valid bit in Hw status */
   13110 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   13111 		return error;
   13112 
   13113 	/* Clear FCERR in Hw status by writing 1 */
   13114 	/* Clear DAEL in Hw status by writing a 1 */
   13115 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   13116 
   13117 	if (sc->sc_type >= WM_T_PCH_SPT)
   13118 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   13119 	else
   13120 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13121 
   13122 	/*
   13123 	 * Either we should have a hardware SPI cycle in progress bit to check
   13124 	 * against, in order to start a new cycle or FDONE bit should be
   13125 	 * changed in the hardware so that it is 1 after hardware reset, which
   13126 	 * can then be used as an indication whether a cycle is in progress or
   13127 	 * has been completed .. we should also have some software semaphore
   13128 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   13129 	 * threads access to those bits can be sequentiallized or a way so that
   13130 	 * 2 threads don't start the cycle at the same time
   13131 	 */
   13132 
   13133 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13134 		/*
   13135 		 * There is no cycle running at present, so we can start a
   13136 		 * cycle
   13137 		 */
   13138 
   13139 		/* Begin by setting Flash Cycle Done. */
   13140 		hsfsts |= HSFSTS_DONE;
   13141 		if (sc->sc_type >= WM_T_PCH_SPT)
   13142 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13143 			    hsfsts & 0xffffUL);
   13144 		else
   13145 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   13146 		error = 0;
   13147 	} else {
   13148 		/*
   13149 		 * Otherwise poll for sometime so the current cycle has a
   13150 		 * chance to end before giving up.
   13151 		 */
   13152 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   13153 			if (sc->sc_type >= WM_T_PCH_SPT)
   13154 				hsfsts = ICH8_FLASH_READ32(sc,
   13155 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13156 			else
   13157 				hsfsts = ICH8_FLASH_READ16(sc,
   13158 				    ICH_FLASH_HSFSTS);
   13159 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   13160 				error = 0;
   13161 				break;
   13162 			}
   13163 			delay(1);
   13164 		}
   13165 		if (error == 0) {
   13166 			/*
   13167 			 * Successful in waiting for previous cycle to timeout,
   13168 			 * now set the Flash Cycle Done.
   13169 			 */
   13170 			hsfsts |= HSFSTS_DONE;
   13171 			if (sc->sc_type >= WM_T_PCH_SPT)
   13172 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13173 				    hsfsts & 0xffffUL);
   13174 			else
   13175 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   13176 				    hsfsts);
   13177 		}
   13178 	}
   13179 	return error;
   13180 }
   13181 
   13182 /******************************************************************************
   13183  * This function starts a flash cycle and waits for its completion
   13184  *
   13185  * sc - The pointer to the hw structure
   13186  ****************************************************************************/
   13187 static int32_t
   13188 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   13189 {
   13190 	uint16_t hsflctl;
   13191 	uint16_t hsfsts;
   13192 	int32_t error = 1;
   13193 	uint32_t i = 0;
   13194 
   13195 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   13196 	if (sc->sc_type >= WM_T_PCH_SPT)
   13197 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   13198 	else
   13199 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13200 	hsflctl |= HSFCTL_GO;
   13201 	if (sc->sc_type >= WM_T_PCH_SPT)
   13202 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13203 		    (uint32_t)hsflctl << 16);
   13204 	else
   13205 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13206 
   13207 	/* Wait till FDONE bit is set to 1 */
   13208 	do {
   13209 		if (sc->sc_type >= WM_T_PCH_SPT)
   13210 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13211 			    & 0xffffUL;
   13212 		else
   13213 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   13214 		if (hsfsts & HSFSTS_DONE)
   13215 			break;
   13216 		delay(1);
   13217 		i++;
   13218 	} while (i < timeout);
   13219 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   13220 		error = 0;
   13221 
   13222 	return error;
   13223 }
   13224 
   13225 /******************************************************************************
   13226  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   13227  *
   13228  * sc - The pointer to the hw structure
   13229  * index - The index of the byte or word to read.
   13230  * size - Size of data to read, 1=byte 2=word, 4=dword
   13231  * data - Pointer to the word to store the value read.
   13232  *****************************************************************************/
   13233 static int32_t
   13234 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   13235     uint32_t size, uint32_t *data)
   13236 {
   13237 	uint16_t hsfsts;
   13238 	uint16_t hsflctl;
   13239 	uint32_t flash_linear_address;
   13240 	uint32_t flash_data = 0;
   13241 	int32_t error = 1;
   13242 	int32_t count = 0;
   13243 
   13244 	if (size < 1  || size > 4 || data == 0x0 ||
   13245 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   13246 		return error;
   13247 
   13248 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   13249 	    sc->sc_ich8_flash_base;
   13250 
   13251 	do {
   13252 		delay(1);
   13253 		/* Steps */
   13254 		error = wm_ich8_cycle_init(sc);
   13255 		if (error)
   13256 			break;
   13257 
   13258 		if (sc->sc_type >= WM_T_PCH_SPT)
   13259 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   13260 			    >> 16;
   13261 		else
   13262 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   13263 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   13264 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   13265 		    & HSFCTL_BCOUNT_MASK;
   13266 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   13267 		if (sc->sc_type >= WM_T_PCH_SPT) {
   13268 			/*
   13269 			 * In SPT, This register is in Lan memory space, not
   13270 			 * flash. Therefore, only 32 bit access is supported.
   13271 			 */
   13272 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   13273 			    (uint32_t)hsflctl << 16);
   13274 		} else
   13275 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   13276 
   13277 		/*
   13278 		 * Write the last 24 bits of index into Flash Linear address
   13279 		 * field in Flash Address
   13280 		 */
   13281 		/* TODO: TBD maybe check the index against the size of flash */
   13282 
   13283 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   13284 
   13285 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   13286 
   13287 		/*
   13288 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   13289 		 * the whole sequence a few more times, else read in (shift in)
   13290 		 * the Flash Data0, the order is least significant byte first
   13291 		 * msb to lsb
   13292 		 */
   13293 		if (error == 0) {
   13294 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   13295 			if (size == 1)
   13296 				*data = (uint8_t)(flash_data & 0x000000FF);
   13297 			else if (size == 2)
   13298 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   13299 			else if (size == 4)
   13300 				*data = (uint32_t)flash_data;
   13301 			break;
   13302 		} else {
   13303 			/*
   13304 			 * If we've gotten here, then things are probably
   13305 			 * completely hosed, but if the error condition is
   13306 			 * detected, it won't hurt to give it another try...
   13307 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   13308 			 */
   13309 			if (sc->sc_type >= WM_T_PCH_SPT)
   13310 				hsfsts = ICH8_FLASH_READ32(sc,
   13311 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   13312 			else
   13313 				hsfsts = ICH8_FLASH_READ16(sc,
   13314 				    ICH_FLASH_HSFSTS);
   13315 
   13316 			if (hsfsts & HSFSTS_ERR) {
   13317 				/* Repeat for some time before giving up. */
   13318 				continue;
   13319 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   13320 				break;
   13321 		}
   13322 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   13323 
   13324 	return error;
   13325 }
   13326 
   13327 /******************************************************************************
   13328  * Reads a single byte from the NVM using the ICH8 flash access registers.
   13329  *
   13330  * sc - pointer to wm_hw structure
   13331  * index - The index of the byte to read.
   13332  * data - Pointer to a byte to store the value read.
   13333  *****************************************************************************/
   13334 static int32_t
   13335 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   13336 {
   13337 	int32_t status;
   13338 	uint32_t word = 0;
   13339 
   13340 	status = wm_read_ich8_data(sc, index, 1, &word);
   13341 	if (status == 0)
   13342 		*data = (uint8_t)word;
   13343 	else
   13344 		*data = 0;
   13345 
   13346 	return status;
   13347 }
   13348 
   13349 /******************************************************************************
   13350  * Reads a word from the NVM using the ICH8 flash access registers.
   13351  *
   13352  * sc - pointer to wm_hw structure
   13353  * index - The starting byte index of the word to read.
   13354  * data - Pointer to a word to store the value read.
   13355  *****************************************************************************/
   13356 static int32_t
   13357 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   13358 {
   13359 	int32_t status;
   13360 	uint32_t word = 0;
   13361 
   13362 	status = wm_read_ich8_data(sc, index, 2, &word);
   13363 	if (status == 0)
   13364 		*data = (uint16_t)word;
   13365 	else
   13366 		*data = 0;
   13367 
   13368 	return status;
   13369 }
   13370 
   13371 /******************************************************************************
   13372  * Reads a dword from the NVM using the ICH8 flash access registers.
   13373  *
   13374  * sc - pointer to wm_hw structure
   13375  * index - The starting byte index of the word to read.
   13376  * data - Pointer to a word to store the value read.
   13377  *****************************************************************************/
   13378 static int32_t
   13379 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   13380 {
   13381 	int32_t status;
   13382 
   13383 	status = wm_read_ich8_data(sc, index, 4, data);
   13384 	return status;
   13385 }
   13386 
   13387 /******************************************************************************
   13388  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   13389  * register.
   13390  *
   13391  * sc - Struct containing variables accessed by shared code
   13392  * offset - offset of word in the EEPROM to read
   13393  * data - word read from the EEPROM
   13394  * words - number of words to read
   13395  *****************************************************************************/
   13396 static int
   13397 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13398 {
   13399 	int32_t	 rv = 0;
   13400 	uint32_t flash_bank = 0;
   13401 	uint32_t act_offset = 0;
   13402 	uint32_t bank_offset = 0;
   13403 	uint16_t word = 0;
   13404 	uint16_t i = 0;
   13405 
   13406 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13407 		device_xname(sc->sc_dev), __func__));
   13408 
   13409 	if (sc->nvm.acquire(sc) != 0)
   13410 		return -1;
   13411 
   13412 	/*
   13413 	 * We need to know which is the valid flash bank.  In the event
   13414 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13415 	 * managing flash_bank. So it cannot be trusted and needs
   13416 	 * to be updated with each read.
   13417 	 */
   13418 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13419 	if (rv) {
   13420 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13421 			device_xname(sc->sc_dev)));
   13422 		flash_bank = 0;
   13423 	}
   13424 
   13425 	/*
   13426 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13427 	 * size
   13428 	 */
   13429 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13430 
   13431 	for (i = 0; i < words; i++) {
   13432 		/* The NVM part needs a byte offset, hence * 2 */
   13433 		act_offset = bank_offset + ((offset + i) * 2);
   13434 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13435 		if (rv) {
   13436 			aprint_error_dev(sc->sc_dev,
   13437 			    "%s: failed to read NVM\n", __func__);
   13438 			break;
   13439 		}
   13440 		data[i] = word;
   13441 	}
   13442 
   13443 	sc->nvm.release(sc);
   13444 	return rv;
   13445 }
   13446 
   13447 /******************************************************************************
   13448  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13449  * register.
   13450  *
   13451  * sc - Struct containing variables accessed by shared code
   13452  * offset - offset of word in the EEPROM to read
   13453  * data - word read from the EEPROM
   13454  * words - number of words to read
   13455  *****************************************************************************/
   13456 static int
   13457 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13458 {
   13459 	int32_t	 rv = 0;
   13460 	uint32_t flash_bank = 0;
   13461 	uint32_t act_offset = 0;
   13462 	uint32_t bank_offset = 0;
   13463 	uint32_t dword = 0;
   13464 	uint16_t i = 0;
   13465 
   13466 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13467 		device_xname(sc->sc_dev), __func__));
   13468 
   13469 	if (sc->nvm.acquire(sc) != 0)
   13470 		return -1;
   13471 
   13472 	/*
   13473 	 * We need to know which is the valid flash bank.  In the event
   13474 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13475 	 * managing flash_bank. So it cannot be trusted and needs
   13476 	 * to be updated with each read.
   13477 	 */
   13478 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13479 	if (rv) {
   13480 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13481 			device_xname(sc->sc_dev)));
   13482 		flash_bank = 0;
   13483 	}
   13484 
   13485 	/*
   13486 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13487 	 * size
   13488 	 */
   13489 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13490 
   13491 	for (i = 0; i < words; i++) {
   13492 		/* The NVM part needs a byte offset, hence * 2 */
   13493 		act_offset = bank_offset + ((offset + i) * 2);
   13494 		/* but we must read dword aligned, so mask ... */
   13495 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13496 		if (rv) {
   13497 			aprint_error_dev(sc->sc_dev,
   13498 			    "%s: failed to read NVM\n", __func__);
   13499 			break;
   13500 		}
   13501 		/* ... and pick out low or high word */
   13502 		if ((act_offset & 0x2) == 0)
   13503 			data[i] = (uint16_t)(dword & 0xFFFF);
   13504 		else
   13505 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13506 	}
   13507 
   13508 	sc->nvm.release(sc);
   13509 	return rv;
   13510 }
   13511 
   13512 /* iNVM */
   13513 
   13514 static int
   13515 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13516 {
   13517 	int32_t	 rv = 0;
   13518 	uint32_t invm_dword;
   13519 	uint16_t i;
   13520 	uint8_t record_type, word_address;
   13521 
   13522 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13523 		device_xname(sc->sc_dev), __func__));
   13524 
   13525 	for (i = 0; i < INVM_SIZE; i++) {
   13526 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13527 		/* Get record type */
   13528 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13529 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13530 			break;
   13531 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13532 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13533 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13534 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13535 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13536 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13537 			if (word_address == address) {
   13538 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13539 				rv = 0;
   13540 				break;
   13541 			}
   13542 		}
   13543 	}
   13544 
   13545 	return rv;
   13546 }
   13547 
   13548 static int
   13549 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13550 {
   13551 	int rv = 0;
   13552 	int i;
   13553 
   13554 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13555 		device_xname(sc->sc_dev), __func__));
   13556 
   13557 	if (sc->nvm.acquire(sc) != 0)
   13558 		return -1;
   13559 
   13560 	for (i = 0; i < words; i++) {
   13561 		switch (offset + i) {
   13562 		case NVM_OFF_MACADDR:
   13563 		case NVM_OFF_MACADDR1:
   13564 		case NVM_OFF_MACADDR2:
   13565 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13566 			if (rv != 0) {
   13567 				data[i] = 0xffff;
   13568 				rv = -1;
   13569 			}
   13570 			break;
   13571 		case NVM_OFF_CFG2:
   13572 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13573 			if (rv != 0) {
   13574 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13575 				rv = 0;
   13576 			}
   13577 			break;
   13578 		case NVM_OFF_CFG4:
   13579 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13580 			if (rv != 0) {
   13581 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13582 				rv = 0;
   13583 			}
   13584 			break;
   13585 		case NVM_OFF_LED_1_CFG:
   13586 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13587 			if (rv != 0) {
   13588 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13589 				rv = 0;
   13590 			}
   13591 			break;
   13592 		case NVM_OFF_LED_0_2_CFG:
   13593 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13594 			if (rv != 0) {
   13595 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13596 				rv = 0;
   13597 			}
   13598 			break;
   13599 		case NVM_OFF_ID_LED_SETTINGS:
   13600 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13601 			if (rv != 0) {
   13602 				*data = ID_LED_RESERVED_FFFF;
   13603 				rv = 0;
   13604 			}
   13605 			break;
   13606 		default:
   13607 			DPRINTF(WM_DEBUG_NVM,
   13608 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13609 			*data = NVM_RESERVED_WORD;
   13610 			break;
   13611 		}
   13612 	}
   13613 
   13614 	sc->nvm.release(sc);
   13615 	return rv;
   13616 }
   13617 
   13618 /* Lock, detecting NVM type, validate checksum, version and read */
   13619 
   13620 static int
   13621 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13622 {
   13623 	uint32_t eecd = 0;
   13624 
   13625 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13626 	    || sc->sc_type == WM_T_82583) {
   13627 		eecd = CSR_READ(sc, WMREG_EECD);
   13628 
   13629 		/* Isolate bits 15 & 16 */
   13630 		eecd = ((eecd >> 15) & 0x03);
   13631 
   13632 		/* If both bits are set, device is Flash type */
   13633 		if (eecd == 0x03)
   13634 			return 0;
   13635 	}
   13636 	return 1;
   13637 }
   13638 
   13639 static int
   13640 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13641 {
   13642 	uint32_t eec;
   13643 
   13644 	eec = CSR_READ(sc, WMREG_EEC);
   13645 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13646 		return 1;
   13647 
   13648 	return 0;
   13649 }
   13650 
   13651 /*
   13652  * wm_nvm_validate_checksum
   13653  *
   13654  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13655  */
   13656 static int
   13657 wm_nvm_validate_checksum(struct wm_softc *sc)
   13658 {
   13659 	uint16_t checksum;
   13660 	uint16_t eeprom_data;
   13661 #ifdef WM_DEBUG
   13662 	uint16_t csum_wordaddr, valid_checksum;
   13663 #endif
   13664 	int i;
   13665 
   13666 	checksum = 0;
   13667 
   13668 	/* Don't check for I211 */
   13669 	if (sc->sc_type == WM_T_I211)
   13670 		return 0;
   13671 
   13672 #ifdef WM_DEBUG
   13673 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13674 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13675 		csum_wordaddr = NVM_OFF_COMPAT;
   13676 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13677 	} else {
   13678 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13679 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13680 	}
   13681 
   13682 	/* Dump EEPROM image for debug */
   13683 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13684 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13685 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13686 		/* XXX PCH_SPT? */
   13687 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13688 		if ((eeprom_data & valid_checksum) == 0)
   13689 			DPRINTF(WM_DEBUG_NVM,
   13690 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13691 				device_xname(sc->sc_dev), eeprom_data,
   13692 				    valid_checksum));
   13693 	}
   13694 
   13695 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13696 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13697 		for (i = 0; i < NVM_SIZE; i++) {
   13698 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13699 				printf("XXXX ");
   13700 			else
   13701 				printf("%04hx ", eeprom_data);
   13702 			if (i % 8 == 7)
   13703 				printf("\n");
   13704 		}
   13705 	}
   13706 
   13707 #endif /* WM_DEBUG */
   13708 
   13709 	for (i = 0; i < NVM_SIZE; i++) {
   13710 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13711 			return 1;
   13712 		checksum += eeprom_data;
   13713 	}
   13714 
   13715 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13716 #ifdef WM_DEBUG
   13717 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13718 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13719 #endif
   13720 	}
   13721 
   13722 	return 0;
   13723 }
   13724 
   13725 static void
   13726 wm_nvm_version_invm(struct wm_softc *sc)
   13727 {
   13728 	uint32_t dword;
   13729 
   13730 	/*
   13731 	 * Linux's code to decode version is very strange, so we don't
   13732 	 * obey that algorithm and just use word 61 as the document.
   13733 	 * Perhaps it's not perfect though...
   13734 	 *
   13735 	 * Example:
   13736 	 *
   13737 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13738 	 */
   13739 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13740 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13741 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13742 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13743 }
   13744 
   13745 static void
   13746 wm_nvm_version(struct wm_softc *sc)
   13747 {
   13748 	uint16_t major, minor, build, patch;
   13749 	uint16_t uid0, uid1;
   13750 	uint16_t nvm_data;
   13751 	uint16_t off;
   13752 	bool check_version = false;
   13753 	bool check_optionrom = false;
   13754 	bool have_build = false;
   13755 	bool have_uid = true;
   13756 
   13757 	/*
   13758 	 * Version format:
   13759 	 *
   13760 	 * XYYZ
   13761 	 * X0YZ
   13762 	 * X0YY
   13763 	 *
   13764 	 * Example:
   13765 	 *
   13766 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13767 	 *	82571	0x50a6	5.10.6?
   13768 	 *	82572	0x506a	5.6.10?
   13769 	 *	82572EI	0x5069	5.6.9?
   13770 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13771 	 *		0x2013	2.1.3?
   13772 	 *	82583	0x10a0	1.10.0? (document says it's default value)
   13773 	 * ICH8+82567	0x0040	0.4.0?
   13774 	 * ICH9+82566	0x1040	1.4.0?
   13775 	 *ICH10+82567	0x0043	0.4.3?
   13776 	 *  PCH+82577	0x00c1	0.12.1?
   13777 	 * PCH2+82579	0x00d3	0.13.3?
   13778 	 *		0x00d4	0.13.4?
   13779 	 *  LPT+I218	0x0023	0.2.3?
   13780 	 *  SPT+I219	0x0084	0.8.4?
   13781 	 *  CNP+I219	0x0054	0.5.4?
   13782 	 */
   13783 
   13784 	/*
   13785 	 * XXX
   13786 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13787 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13788 	 */
   13789 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13790 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13791 		have_uid = false;
   13792 
   13793 	switch (sc->sc_type) {
   13794 	case WM_T_82571:
   13795 	case WM_T_82572:
   13796 	case WM_T_82574:
   13797 	case WM_T_82583:
   13798 		check_version = true;
   13799 		check_optionrom = true;
   13800 		have_build = true;
   13801 		break;
   13802 	case WM_T_ICH8:
   13803 	case WM_T_ICH9:
   13804 	case WM_T_ICH10:
   13805 	case WM_T_PCH:
   13806 	case WM_T_PCH2:
   13807 	case WM_T_PCH_LPT:
   13808 	case WM_T_PCH_SPT:
   13809 	case WM_T_PCH_CNP:
   13810 		check_version = true;
   13811 		have_build = true;
   13812 		have_uid = false;
   13813 		break;
   13814 	case WM_T_82575:
   13815 	case WM_T_82576:
   13816 	case WM_T_82580:
   13817 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13818 			check_version = true;
   13819 		break;
   13820 	case WM_T_I211:
   13821 		wm_nvm_version_invm(sc);
   13822 		have_uid = false;
   13823 		goto printver;
   13824 	case WM_T_I210:
   13825 		if (!wm_nvm_flash_presence_i210(sc)) {
   13826 			wm_nvm_version_invm(sc);
   13827 			have_uid = false;
   13828 			goto printver;
   13829 		}
   13830 		/* FALLTHROUGH */
   13831 	case WM_T_I350:
   13832 	case WM_T_I354:
   13833 		check_version = true;
   13834 		check_optionrom = true;
   13835 		break;
   13836 	default:
   13837 		return;
   13838 	}
   13839 	if (check_version
   13840 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13841 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13842 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13843 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13844 			build = nvm_data & NVM_BUILD_MASK;
   13845 			have_build = true;
   13846 		} else
   13847 			minor = nvm_data & 0x00ff;
   13848 
   13849 		/* Decimal */
   13850 		minor = (minor / 16) * 10 + (minor % 16);
   13851 		sc->sc_nvm_ver_major = major;
   13852 		sc->sc_nvm_ver_minor = minor;
   13853 
   13854 printver:
   13855 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13856 		    sc->sc_nvm_ver_minor);
   13857 		if (have_build) {
   13858 			sc->sc_nvm_ver_build = build;
   13859 			aprint_verbose(".%d", build);
   13860 		}
   13861 	}
   13862 
   13863 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13864 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13865 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13866 		/* Option ROM Version */
   13867 		if ((off != 0x0000) && (off != 0xffff)) {
   13868 			int rv;
   13869 
   13870 			off += NVM_COMBO_VER_OFF;
   13871 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13872 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13873 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13874 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13875 				/* 16bits */
   13876 				major = uid0 >> 8;
   13877 				build = (uid0 << 8) | (uid1 >> 8);
   13878 				patch = uid1 & 0x00ff;
   13879 				aprint_verbose(", option ROM Version %d.%d.%d",
   13880 				    major, build, patch);
   13881 			}
   13882 		}
   13883 	}
   13884 
   13885 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13886 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13887 }
   13888 
   13889 /*
   13890  * wm_nvm_read:
   13891  *
   13892  *	Read data from the serial EEPROM.
   13893  */
   13894 static int
   13895 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13896 {
   13897 	int rv;
   13898 
   13899 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13900 		device_xname(sc->sc_dev), __func__));
   13901 
   13902 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13903 		return -1;
   13904 
   13905 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13906 
   13907 	return rv;
   13908 }
   13909 
   13910 /*
   13911  * Hardware semaphores.
   13912  * Very complexed...
   13913  */
   13914 
   13915 static int
   13916 wm_get_null(struct wm_softc *sc)
   13917 {
   13918 
   13919 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13920 		device_xname(sc->sc_dev), __func__));
   13921 	return 0;
   13922 }
   13923 
   13924 static void
   13925 wm_put_null(struct wm_softc *sc)
   13926 {
   13927 
   13928 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13929 		device_xname(sc->sc_dev), __func__));
   13930 	return;
   13931 }
   13932 
   13933 static int
   13934 wm_get_eecd(struct wm_softc *sc)
   13935 {
   13936 	uint32_t reg;
   13937 	int x;
   13938 
   13939 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13940 		device_xname(sc->sc_dev), __func__));
   13941 
   13942 	reg = CSR_READ(sc, WMREG_EECD);
   13943 
   13944 	/* Request EEPROM access. */
   13945 	reg |= EECD_EE_REQ;
   13946 	CSR_WRITE(sc, WMREG_EECD, reg);
   13947 
   13948 	/* ..and wait for it to be granted. */
   13949 	for (x = 0; x < 1000; x++) {
   13950 		reg = CSR_READ(sc, WMREG_EECD);
   13951 		if (reg & EECD_EE_GNT)
   13952 			break;
   13953 		delay(5);
   13954 	}
   13955 	if ((reg & EECD_EE_GNT) == 0) {
   13956 		aprint_error_dev(sc->sc_dev,
   13957 		    "could not acquire EEPROM GNT\n");
   13958 		reg &= ~EECD_EE_REQ;
   13959 		CSR_WRITE(sc, WMREG_EECD, reg);
   13960 		return -1;
   13961 	}
   13962 
   13963 	return 0;
   13964 }
   13965 
   13966 static void
   13967 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13968 {
   13969 
   13970 	*eecd |= EECD_SK;
   13971 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13972 	CSR_WRITE_FLUSH(sc);
   13973 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13974 		delay(1);
   13975 	else
   13976 		delay(50);
   13977 }
   13978 
   13979 static void
   13980 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13981 {
   13982 
   13983 	*eecd &= ~EECD_SK;
   13984 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13985 	CSR_WRITE_FLUSH(sc);
   13986 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13987 		delay(1);
   13988 	else
   13989 		delay(50);
   13990 }
   13991 
   13992 static void
   13993 wm_put_eecd(struct wm_softc *sc)
   13994 {
   13995 	uint32_t reg;
   13996 
   13997 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13998 		device_xname(sc->sc_dev), __func__));
   13999 
   14000 	/* Stop nvm */
   14001 	reg = CSR_READ(sc, WMREG_EECD);
   14002 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   14003 		/* Pull CS high */
   14004 		reg |= EECD_CS;
   14005 		wm_nvm_eec_clock_lower(sc, &reg);
   14006 	} else {
   14007 		/* CS on Microwire is active-high */
   14008 		reg &= ~(EECD_CS | EECD_DI);
   14009 		CSR_WRITE(sc, WMREG_EECD, reg);
   14010 		wm_nvm_eec_clock_raise(sc, &reg);
   14011 		wm_nvm_eec_clock_lower(sc, &reg);
   14012 	}
   14013 
   14014 	reg = CSR_READ(sc, WMREG_EECD);
   14015 	reg &= ~EECD_EE_REQ;
   14016 	CSR_WRITE(sc, WMREG_EECD, reg);
   14017 
   14018 	return;
   14019 }
   14020 
   14021 /*
   14022  * Get hardware semaphore.
   14023  * Same as e1000_get_hw_semaphore_generic()
   14024  */
   14025 static int
   14026 wm_get_swsm_semaphore(struct wm_softc *sc)
   14027 {
   14028 	int32_t timeout;
   14029 	uint32_t swsm;
   14030 
   14031 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14032 		device_xname(sc->sc_dev), __func__));
   14033 	KASSERT(sc->sc_nvm_wordsize > 0);
   14034 
   14035 retry:
   14036 	/* Get the SW semaphore. */
   14037 	timeout = sc->sc_nvm_wordsize + 1;
   14038 	while (timeout) {
   14039 		swsm = CSR_READ(sc, WMREG_SWSM);
   14040 
   14041 		if ((swsm & SWSM_SMBI) == 0)
   14042 			break;
   14043 
   14044 		delay(50);
   14045 		timeout--;
   14046 	}
   14047 
   14048 	if (timeout == 0) {
   14049 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   14050 			/*
   14051 			 * In rare circumstances, the SW semaphore may already
   14052 			 * be held unintentionally. Clear the semaphore once
   14053 			 * before giving up.
   14054 			 */
   14055 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   14056 			wm_put_swsm_semaphore(sc);
   14057 			goto retry;
   14058 		}
   14059 		aprint_error_dev(sc->sc_dev,
   14060 		    "could not acquire SWSM SMBI\n");
   14061 		return 1;
   14062 	}
   14063 
   14064 	/* Get the FW semaphore. */
   14065 	timeout = sc->sc_nvm_wordsize + 1;
   14066 	while (timeout) {
   14067 		swsm = CSR_READ(sc, WMREG_SWSM);
   14068 		swsm |= SWSM_SWESMBI;
   14069 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   14070 		/* If we managed to set the bit we got the semaphore. */
   14071 		swsm = CSR_READ(sc, WMREG_SWSM);
   14072 		if (swsm & SWSM_SWESMBI)
   14073 			break;
   14074 
   14075 		delay(50);
   14076 		timeout--;
   14077 	}
   14078 
   14079 	if (timeout == 0) {
   14080 		aprint_error_dev(sc->sc_dev,
   14081 		    "could not acquire SWSM SWESMBI\n");
   14082 		/* Release semaphores */
   14083 		wm_put_swsm_semaphore(sc);
   14084 		return 1;
   14085 	}
   14086 	return 0;
   14087 }
   14088 
   14089 /*
   14090  * Put hardware semaphore.
   14091  * Same as e1000_put_hw_semaphore_generic()
   14092  */
   14093 static void
   14094 wm_put_swsm_semaphore(struct wm_softc *sc)
   14095 {
   14096 	uint32_t swsm;
   14097 
   14098 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14099 		device_xname(sc->sc_dev), __func__));
   14100 
   14101 	swsm = CSR_READ(sc, WMREG_SWSM);
   14102 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   14103 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   14104 }
   14105 
   14106 /*
   14107  * Get SW/FW semaphore.
   14108  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   14109  */
   14110 static int
   14111 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14112 {
   14113 	uint32_t swfw_sync;
   14114 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   14115 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   14116 	int timeout;
   14117 
   14118 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14119 		device_xname(sc->sc_dev), __func__));
   14120 
   14121 	if (sc->sc_type == WM_T_80003)
   14122 		timeout = 50;
   14123 	else
   14124 		timeout = 200;
   14125 
   14126 	while (timeout) {
   14127 		if (wm_get_swsm_semaphore(sc)) {
   14128 			aprint_error_dev(sc->sc_dev,
   14129 			    "%s: failed to get semaphore\n",
   14130 			    __func__);
   14131 			return 1;
   14132 		}
   14133 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14134 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   14135 			swfw_sync |= swmask;
   14136 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14137 			wm_put_swsm_semaphore(sc);
   14138 			return 0;
   14139 		}
   14140 		wm_put_swsm_semaphore(sc);
   14141 		delay(5000);
   14142 		timeout--;
   14143 	}
   14144 	device_printf(sc->sc_dev,
   14145 	    "failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   14146 	    mask, swfw_sync);
   14147 	return 1;
   14148 }
   14149 
   14150 static void
   14151 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   14152 {
   14153 	uint32_t swfw_sync;
   14154 
   14155 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14156 		device_xname(sc->sc_dev), __func__));
   14157 
   14158 	while (wm_get_swsm_semaphore(sc) != 0)
   14159 		continue;
   14160 
   14161 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   14162 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   14163 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   14164 
   14165 	wm_put_swsm_semaphore(sc);
   14166 }
   14167 
   14168 static int
   14169 wm_get_nvm_80003(struct wm_softc *sc)
   14170 {
   14171 	int rv;
   14172 
   14173 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   14174 		device_xname(sc->sc_dev), __func__));
   14175 
   14176 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   14177 		aprint_error_dev(sc->sc_dev,
   14178 		    "%s: failed to get semaphore(SWFW)\n", __func__);
   14179 		return rv;
   14180 	}
   14181 
   14182 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14183 	    && (rv = wm_get_eecd(sc)) != 0) {
   14184 		aprint_error_dev(sc->sc_dev,
   14185 		    "%s: failed to get semaphore(EECD)\n", __func__);
   14186 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14187 		return rv;
   14188 	}
   14189 
   14190 	return 0;
   14191 }
   14192 
   14193 static void
   14194 wm_put_nvm_80003(struct wm_softc *sc)
   14195 {
   14196 
   14197 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14198 		device_xname(sc->sc_dev), __func__));
   14199 
   14200 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14201 		wm_put_eecd(sc);
   14202 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   14203 }
   14204 
   14205 static int
   14206 wm_get_nvm_82571(struct wm_softc *sc)
   14207 {
   14208 	int rv;
   14209 
   14210 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14211 		device_xname(sc->sc_dev), __func__));
   14212 
   14213 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   14214 		return rv;
   14215 
   14216 	switch (sc->sc_type) {
   14217 	case WM_T_82573:
   14218 		break;
   14219 	default:
   14220 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14221 			rv = wm_get_eecd(sc);
   14222 		break;
   14223 	}
   14224 
   14225 	if (rv != 0) {
   14226 		aprint_error_dev(sc->sc_dev,
   14227 		    "%s: failed to get semaphore\n",
   14228 		    __func__);
   14229 		wm_put_swsm_semaphore(sc);
   14230 	}
   14231 
   14232 	return rv;
   14233 }
   14234 
   14235 static void
   14236 wm_put_nvm_82571(struct wm_softc *sc)
   14237 {
   14238 
   14239 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14240 		device_xname(sc->sc_dev), __func__));
   14241 
   14242 	switch (sc->sc_type) {
   14243 	case WM_T_82573:
   14244 		break;
   14245 	default:
   14246 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   14247 			wm_put_eecd(sc);
   14248 		break;
   14249 	}
   14250 
   14251 	wm_put_swsm_semaphore(sc);
   14252 }
   14253 
   14254 static int
   14255 wm_get_phy_82575(struct wm_softc *sc)
   14256 {
   14257 
   14258 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14259 		device_xname(sc->sc_dev), __func__));
   14260 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14261 }
   14262 
   14263 static void
   14264 wm_put_phy_82575(struct wm_softc *sc)
   14265 {
   14266 
   14267 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14268 		device_xname(sc->sc_dev), __func__));
   14269 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   14270 }
   14271 
   14272 static int
   14273 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   14274 {
   14275 	uint32_t ext_ctrl;
   14276 	int timeout = 200;
   14277 
   14278 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14279 		device_xname(sc->sc_dev), __func__));
   14280 
   14281 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14282 	for (timeout = 0; timeout < 200; timeout++) {
   14283 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14284 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14285 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14286 
   14287 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14288 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14289 			return 0;
   14290 		delay(5000);
   14291 	}
   14292 	device_printf(sc->sc_dev,
   14293 	    "failed to get swfwhw semaphore ext_ctrl 0x%x\n", ext_ctrl);
   14294 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14295 	return 1;
   14296 }
   14297 
   14298 static void
   14299 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   14300 {
   14301 	uint32_t ext_ctrl;
   14302 
   14303 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14304 		device_xname(sc->sc_dev), __func__));
   14305 
   14306 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14307 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14308 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14309 
   14310 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   14311 }
   14312 
   14313 static int
   14314 wm_get_swflag_ich8lan(struct wm_softc *sc)
   14315 {
   14316 	uint32_t ext_ctrl;
   14317 	int timeout;
   14318 
   14319 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14320 		device_xname(sc->sc_dev), __func__));
   14321 	mutex_enter(sc->sc_ich_phymtx);
   14322 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   14323 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14324 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   14325 			break;
   14326 		delay(1000);
   14327 	}
   14328 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   14329 		device_printf(sc->sc_dev,
   14330 		    "SW has already locked the resource\n");
   14331 		goto out;
   14332 	}
   14333 
   14334 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14335 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14336 	for (timeout = 0; timeout < 1000; timeout++) {
   14337 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14338 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   14339 			break;
   14340 		delay(1000);
   14341 	}
   14342 	if (timeout >= 1000) {
   14343 		device_printf(sc->sc_dev, "failed to acquire semaphore\n");
   14344 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14345 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14346 		goto out;
   14347 	}
   14348 	return 0;
   14349 
   14350 out:
   14351 	mutex_exit(sc->sc_ich_phymtx);
   14352 	return 1;
   14353 }
   14354 
   14355 static void
   14356 wm_put_swflag_ich8lan(struct wm_softc *sc)
   14357 {
   14358 	uint32_t ext_ctrl;
   14359 
   14360 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14361 		device_xname(sc->sc_dev), __func__));
   14362 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   14363 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   14364 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14365 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   14366 	} else {
   14367 		device_printf(sc->sc_dev, "Semaphore unexpectedly released\n");
   14368 	}
   14369 
   14370 	mutex_exit(sc->sc_ich_phymtx);
   14371 }
   14372 
   14373 static int
   14374 wm_get_nvm_ich8lan(struct wm_softc *sc)
   14375 {
   14376 
   14377 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14378 		device_xname(sc->sc_dev), __func__));
   14379 	mutex_enter(sc->sc_ich_nvmmtx);
   14380 
   14381 	return 0;
   14382 }
   14383 
   14384 static void
   14385 wm_put_nvm_ich8lan(struct wm_softc *sc)
   14386 {
   14387 
   14388 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14389 		device_xname(sc->sc_dev), __func__));
   14390 	mutex_exit(sc->sc_ich_nvmmtx);
   14391 }
   14392 
   14393 static int
   14394 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   14395 {
   14396 	int i = 0;
   14397 	uint32_t reg;
   14398 
   14399 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14400 		device_xname(sc->sc_dev), __func__));
   14401 
   14402 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14403 	do {
   14404 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   14405 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14406 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14407 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   14408 			break;
   14409 		delay(2*1000);
   14410 		i++;
   14411 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   14412 
   14413 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   14414 		wm_put_hw_semaphore_82573(sc);
   14415 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   14416 		    device_xname(sc->sc_dev));
   14417 		return -1;
   14418 	}
   14419 
   14420 	return 0;
   14421 }
   14422 
   14423 static void
   14424 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   14425 {
   14426 	uint32_t reg;
   14427 
   14428 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14429 		device_xname(sc->sc_dev), __func__));
   14430 
   14431 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14432 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   14433 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14434 }
   14435 
   14436 /*
   14437  * Management mode and power management related subroutines.
   14438  * BMC, AMT, suspend/resume and EEE.
   14439  */
   14440 
   14441 #ifdef WM_WOL
   14442 static int
   14443 wm_check_mng_mode(struct wm_softc *sc)
   14444 {
   14445 	int rv;
   14446 
   14447 	switch (sc->sc_type) {
   14448 	case WM_T_ICH8:
   14449 	case WM_T_ICH9:
   14450 	case WM_T_ICH10:
   14451 	case WM_T_PCH:
   14452 	case WM_T_PCH2:
   14453 	case WM_T_PCH_LPT:
   14454 	case WM_T_PCH_SPT:
   14455 	case WM_T_PCH_CNP:
   14456 		rv = wm_check_mng_mode_ich8lan(sc);
   14457 		break;
   14458 	case WM_T_82574:
   14459 	case WM_T_82583:
   14460 		rv = wm_check_mng_mode_82574(sc);
   14461 		break;
   14462 	case WM_T_82571:
   14463 	case WM_T_82572:
   14464 	case WM_T_82573:
   14465 	case WM_T_80003:
   14466 		rv = wm_check_mng_mode_generic(sc);
   14467 		break;
   14468 	default:
   14469 		/* Noting to do */
   14470 		rv = 0;
   14471 		break;
   14472 	}
   14473 
   14474 	return rv;
   14475 }
   14476 
   14477 static int
   14478 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14479 {
   14480 	uint32_t fwsm;
   14481 
   14482 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14483 
   14484 	if (((fwsm & FWSM_FW_VALID) != 0)
   14485 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14486 		return 1;
   14487 
   14488 	return 0;
   14489 }
   14490 
   14491 static int
   14492 wm_check_mng_mode_82574(struct wm_softc *sc)
   14493 {
   14494 	uint16_t data;
   14495 
   14496 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14497 
   14498 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14499 		return 1;
   14500 
   14501 	return 0;
   14502 }
   14503 
   14504 static int
   14505 wm_check_mng_mode_generic(struct wm_softc *sc)
   14506 {
   14507 	uint32_t fwsm;
   14508 
   14509 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14510 
   14511 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14512 		return 1;
   14513 
   14514 	return 0;
   14515 }
   14516 #endif /* WM_WOL */
   14517 
   14518 static int
   14519 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14520 {
   14521 	uint32_t manc, fwsm, factps;
   14522 
   14523 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14524 		return 0;
   14525 
   14526 	manc = CSR_READ(sc, WMREG_MANC);
   14527 
   14528 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14529 		device_xname(sc->sc_dev), manc));
   14530 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14531 		return 0;
   14532 
   14533 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14534 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14535 		factps = CSR_READ(sc, WMREG_FACTPS);
   14536 		if (((factps & FACTPS_MNGCG) == 0)
   14537 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14538 			return 1;
   14539 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14540 		uint16_t data;
   14541 
   14542 		factps = CSR_READ(sc, WMREG_FACTPS);
   14543 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14544 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14545 			device_xname(sc->sc_dev), factps, data));
   14546 		if (((factps & FACTPS_MNGCG) == 0)
   14547 		    && ((data & NVM_CFG2_MNGM_MASK)
   14548 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14549 			return 1;
   14550 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14551 	    && ((manc & MANC_ASF_EN) == 0))
   14552 		return 1;
   14553 
   14554 	return 0;
   14555 }
   14556 
   14557 static bool
   14558 wm_phy_resetisblocked(struct wm_softc *sc)
   14559 {
   14560 	bool blocked = false;
   14561 	uint32_t reg;
   14562 	int i = 0;
   14563 
   14564 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14565 		device_xname(sc->sc_dev), __func__));
   14566 
   14567 	switch (sc->sc_type) {
   14568 	case WM_T_ICH8:
   14569 	case WM_T_ICH9:
   14570 	case WM_T_ICH10:
   14571 	case WM_T_PCH:
   14572 	case WM_T_PCH2:
   14573 	case WM_T_PCH_LPT:
   14574 	case WM_T_PCH_SPT:
   14575 	case WM_T_PCH_CNP:
   14576 		do {
   14577 			reg = CSR_READ(sc, WMREG_FWSM);
   14578 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14579 				blocked = true;
   14580 				delay(10*1000);
   14581 				continue;
   14582 			}
   14583 			blocked = false;
   14584 		} while (blocked && (i++ < 30));
   14585 		return blocked;
   14586 		break;
   14587 	case WM_T_82571:
   14588 	case WM_T_82572:
   14589 	case WM_T_82573:
   14590 	case WM_T_82574:
   14591 	case WM_T_82583:
   14592 	case WM_T_80003:
   14593 		reg = CSR_READ(sc, WMREG_MANC);
   14594 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14595 			return true;
   14596 		else
   14597 			return false;
   14598 		break;
   14599 	default:
   14600 		/* No problem */
   14601 		break;
   14602 	}
   14603 
   14604 	return false;
   14605 }
   14606 
   14607 static void
   14608 wm_get_hw_control(struct wm_softc *sc)
   14609 {
   14610 	uint32_t reg;
   14611 
   14612 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14613 		device_xname(sc->sc_dev), __func__));
   14614 
   14615 	if (sc->sc_type == WM_T_82573) {
   14616 		reg = CSR_READ(sc, WMREG_SWSM);
   14617 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14618 	} else if (sc->sc_type >= WM_T_82571) {
   14619 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14620 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14621 	}
   14622 }
   14623 
   14624 static void
   14625 wm_release_hw_control(struct wm_softc *sc)
   14626 {
   14627 	uint32_t reg;
   14628 
   14629 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14630 		device_xname(sc->sc_dev), __func__));
   14631 
   14632 	if (sc->sc_type == WM_T_82573) {
   14633 		reg = CSR_READ(sc, WMREG_SWSM);
   14634 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14635 	} else if (sc->sc_type >= WM_T_82571) {
   14636 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14637 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14638 	}
   14639 }
   14640 
   14641 static void
   14642 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14643 {
   14644 	uint32_t reg;
   14645 
   14646 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14647 		device_xname(sc->sc_dev), __func__));
   14648 
   14649 	if (sc->sc_type < WM_T_PCH2)
   14650 		return;
   14651 
   14652 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14653 
   14654 	if (gate)
   14655 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14656 	else
   14657 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14658 
   14659 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14660 }
   14661 
   14662 static int
   14663 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14664 {
   14665 	uint32_t fwsm, reg;
   14666 	int rv = 0;
   14667 
   14668 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14669 		device_xname(sc->sc_dev), __func__));
   14670 
   14671 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14672 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14673 
   14674 	/* Disable ULP */
   14675 	wm_ulp_disable(sc);
   14676 
   14677 	/* Acquire PHY semaphore */
   14678 	rv = sc->phy.acquire(sc);
   14679 	if (rv != 0) {
   14680 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14681 		device_xname(sc->sc_dev), __func__));
   14682 		return -1;
   14683 	}
   14684 
   14685 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14686 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14687 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14688 	 */
   14689 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14690 	switch (sc->sc_type) {
   14691 	case WM_T_PCH_LPT:
   14692 	case WM_T_PCH_SPT:
   14693 	case WM_T_PCH_CNP:
   14694 		if (wm_phy_is_accessible_pchlan(sc))
   14695 			break;
   14696 
   14697 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14698 		 * forcing MAC to SMBus mode first.
   14699 		 */
   14700 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14701 		reg |= CTRL_EXT_FORCE_SMBUS;
   14702 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14703 #if 0
   14704 		/* XXX Isn't this required??? */
   14705 		CSR_WRITE_FLUSH(sc);
   14706 #endif
   14707 		/* Wait 50 milliseconds for MAC to finish any retries
   14708 		 * that it might be trying to perform from previous
   14709 		 * attempts to acknowledge any phy read requests.
   14710 		 */
   14711 		delay(50 * 1000);
   14712 		/* FALLTHROUGH */
   14713 	case WM_T_PCH2:
   14714 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14715 			break;
   14716 		/* FALLTHROUGH */
   14717 	case WM_T_PCH:
   14718 		if (sc->sc_type == WM_T_PCH)
   14719 			if ((fwsm & FWSM_FW_VALID) != 0)
   14720 				break;
   14721 
   14722 		if (wm_phy_resetisblocked(sc) == true) {
   14723 			device_printf(sc->sc_dev, "XXX reset is blocked(3)\n");
   14724 			break;
   14725 		}
   14726 
   14727 		/* Toggle LANPHYPC Value bit */
   14728 		wm_toggle_lanphypc_pch_lpt(sc);
   14729 
   14730 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14731 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14732 				break;
   14733 
   14734 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14735 			 * so ensure that the MAC is also out of SMBus mode
   14736 			 */
   14737 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14738 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14739 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14740 
   14741 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14742 				break;
   14743 			rv = -1;
   14744 		}
   14745 		break;
   14746 	default:
   14747 		break;
   14748 	}
   14749 
   14750 	/* Release semaphore */
   14751 	sc->phy.release(sc);
   14752 
   14753 	if (rv == 0) {
   14754 		/* Check to see if able to reset PHY.  Print error if not */
   14755 		if (wm_phy_resetisblocked(sc)) {
   14756 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14757 			goto out;
   14758 		}
   14759 
   14760 		/* Reset the PHY before any access to it.  Doing so, ensures
   14761 		 * that the PHY is in a known good state before we read/write
   14762 		 * PHY registers.  The generic reset is sufficient here,
   14763 		 * because we haven't determined the PHY type yet.
   14764 		 */
   14765 		if (wm_reset_phy(sc) != 0)
   14766 			goto out;
   14767 
   14768 		/* On a successful reset, possibly need to wait for the PHY
   14769 		 * to quiesce to an accessible state before returning control
   14770 		 * to the calling function.  If the PHY does not quiesce, then
   14771 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14772 		 *  the PHY is in.
   14773 		 */
   14774 		if (wm_phy_resetisblocked(sc))
   14775 			device_printf(sc->sc_dev, "XXX reset is blocked(4)\n");
   14776 	}
   14777 
   14778 out:
   14779 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14780 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14781 		delay(10*1000);
   14782 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14783 	}
   14784 
   14785 	return 0;
   14786 }
   14787 
   14788 static void
   14789 wm_init_manageability(struct wm_softc *sc)
   14790 {
   14791 
   14792 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14793 		device_xname(sc->sc_dev), __func__));
   14794 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14795 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14796 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14797 
   14798 		/* Disable hardware interception of ARP */
   14799 		manc &= ~MANC_ARP_EN;
   14800 
   14801 		/* Enable receiving management packets to the host */
   14802 		if (sc->sc_type >= WM_T_82571) {
   14803 			manc |= MANC_EN_MNG2HOST;
   14804 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14805 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14806 		}
   14807 
   14808 		CSR_WRITE(sc, WMREG_MANC, manc);
   14809 	}
   14810 }
   14811 
   14812 static void
   14813 wm_release_manageability(struct wm_softc *sc)
   14814 {
   14815 
   14816 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14817 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14818 
   14819 		manc |= MANC_ARP_EN;
   14820 		if (sc->sc_type >= WM_T_82571)
   14821 			manc &= ~MANC_EN_MNG2HOST;
   14822 
   14823 		CSR_WRITE(sc, WMREG_MANC, manc);
   14824 	}
   14825 }
   14826 
   14827 static void
   14828 wm_get_wakeup(struct wm_softc *sc)
   14829 {
   14830 
   14831 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14832 	switch (sc->sc_type) {
   14833 	case WM_T_82573:
   14834 	case WM_T_82583:
   14835 		sc->sc_flags |= WM_F_HAS_AMT;
   14836 		/* FALLTHROUGH */
   14837 	case WM_T_80003:
   14838 	case WM_T_82575:
   14839 	case WM_T_82576:
   14840 	case WM_T_82580:
   14841 	case WM_T_I350:
   14842 	case WM_T_I354:
   14843 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14844 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14845 		/* FALLTHROUGH */
   14846 	case WM_T_82541:
   14847 	case WM_T_82541_2:
   14848 	case WM_T_82547:
   14849 	case WM_T_82547_2:
   14850 	case WM_T_82571:
   14851 	case WM_T_82572:
   14852 	case WM_T_82574:
   14853 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14854 		break;
   14855 	case WM_T_ICH8:
   14856 	case WM_T_ICH9:
   14857 	case WM_T_ICH10:
   14858 	case WM_T_PCH:
   14859 	case WM_T_PCH2:
   14860 	case WM_T_PCH_LPT:
   14861 	case WM_T_PCH_SPT:
   14862 	case WM_T_PCH_CNP:
   14863 		sc->sc_flags |= WM_F_HAS_AMT;
   14864 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14865 		break;
   14866 	default:
   14867 		break;
   14868 	}
   14869 
   14870 	/* 1: HAS_MANAGE */
   14871 	if (wm_enable_mng_pass_thru(sc) != 0)
   14872 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14873 
   14874 	/*
   14875 	 * Note that the WOL flags is set after the resetting of the eeprom
   14876 	 * stuff
   14877 	 */
   14878 }
   14879 
   14880 /*
   14881  * Unconfigure Ultra Low Power mode.
   14882  * Only for I217 and newer (see below).
   14883  */
   14884 static int
   14885 wm_ulp_disable(struct wm_softc *sc)
   14886 {
   14887 	uint32_t reg;
   14888 	uint16_t phyreg;
   14889 	int i = 0, rv = 0;
   14890 
   14891 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14892 		device_xname(sc->sc_dev), __func__));
   14893 	/* Exclude old devices */
   14894 	if ((sc->sc_type < WM_T_PCH_LPT)
   14895 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14896 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14897 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14898 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14899 		return 0;
   14900 
   14901 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14902 		/* Request ME un-configure ULP mode in the PHY */
   14903 		reg = CSR_READ(sc, WMREG_H2ME);
   14904 		reg &= ~H2ME_ULP;
   14905 		reg |= H2ME_ENFORCE_SETTINGS;
   14906 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14907 
   14908 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14909 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14910 			if (i++ == 30) {
   14911 				device_printf(sc->sc_dev, "%s timed out\n",
   14912 				    __func__);
   14913 				return -1;
   14914 			}
   14915 			delay(10 * 1000);
   14916 		}
   14917 		reg = CSR_READ(sc, WMREG_H2ME);
   14918 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14919 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14920 
   14921 		return 0;
   14922 	}
   14923 
   14924 	/* Acquire semaphore */
   14925 	rv = sc->phy.acquire(sc);
   14926 	if (rv != 0) {
   14927 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14928 		device_xname(sc->sc_dev), __func__));
   14929 		return -1;
   14930 	}
   14931 
   14932 	/* Toggle LANPHYPC */
   14933 	wm_toggle_lanphypc_pch_lpt(sc);
   14934 
   14935 	/* Unforce SMBus mode in PHY */
   14936 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14937 	if (rv != 0) {
   14938 		uint32_t reg2;
   14939 
   14940 		aprint_debug_dev(sc->sc_dev, "%s: Force SMBus first.\n",
   14941 			__func__);
   14942 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14943 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14944 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14945 		delay(50 * 1000);
   14946 
   14947 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14948 		    &phyreg);
   14949 		if (rv != 0)
   14950 			goto release;
   14951 	}
   14952 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14953 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14954 
   14955 	/* Unforce SMBus mode in MAC */
   14956 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14957 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14958 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14959 
   14960 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14961 	if (rv != 0)
   14962 		goto release;
   14963 	phyreg |= HV_PM_CTRL_K1_ENA;
   14964 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14965 
   14966 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14967 		&phyreg);
   14968 	if (rv != 0)
   14969 		goto release;
   14970 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14971 	    | I218_ULP_CONFIG1_STICKY_ULP
   14972 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14973 	    | I218_ULP_CONFIG1_WOL_HOST
   14974 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14975 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14976 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14977 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14978 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14979 	phyreg |= I218_ULP_CONFIG1_START;
   14980 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14981 
   14982 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14983 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14984 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14985 
   14986 release:
   14987 	/* Release semaphore */
   14988 	sc->phy.release(sc);
   14989 	wm_gmii_reset(sc);
   14990 	delay(50 * 1000);
   14991 
   14992 	return rv;
   14993 }
   14994 
   14995 /* WOL in the newer chipset interfaces (pchlan) */
   14996 static int
   14997 wm_enable_phy_wakeup(struct wm_softc *sc)
   14998 {
   14999 	device_t dev = sc->sc_dev;
   15000 	uint32_t mreg, moff;
   15001 	uint16_t wuce, wuc, wufc, preg;
   15002 	int i, rv;
   15003 
   15004 	KASSERT(sc->sc_type >= WM_T_PCH);
   15005 
   15006 	/* Copy MAC RARs to PHY RARs */
   15007 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   15008 
   15009 	/* Activate PHY wakeup */
   15010 	rv = sc->phy.acquire(sc);
   15011 	if (rv != 0) {
   15012 		device_printf(dev, "%s: failed to acquire semaphore\n",
   15013 		    __func__);
   15014 		return rv;
   15015 	}
   15016 
   15017 	/*
   15018 	 * Enable access to PHY wakeup registers.
   15019 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   15020 	 */
   15021 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   15022 	if (rv != 0) {
   15023 		device_printf(dev,
   15024 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   15025 		goto release;
   15026 	}
   15027 
   15028 	/* Copy MAC MTA to PHY MTA */
   15029 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   15030 		uint16_t lo, hi;
   15031 
   15032 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   15033 		lo = (uint16_t)(mreg & 0xffff);
   15034 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   15035 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   15036 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   15037 	}
   15038 
   15039 	/* Configure PHY Rx Control register */
   15040 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   15041 	mreg = CSR_READ(sc, WMREG_RCTL);
   15042 	if (mreg & RCTL_UPE)
   15043 		preg |= BM_RCTL_UPE;
   15044 	if (mreg & RCTL_MPE)
   15045 		preg |= BM_RCTL_MPE;
   15046 	preg &= ~(BM_RCTL_MO_MASK);
   15047 	moff = __SHIFTOUT(mreg, RCTL_MO);
   15048 	if (moff != 0)
   15049 		preg |= moff << BM_RCTL_MO_SHIFT;
   15050 	if (mreg & RCTL_BAM)
   15051 		preg |= BM_RCTL_BAM;
   15052 	if (mreg & RCTL_PMCF)
   15053 		preg |= BM_RCTL_PMCF;
   15054 	mreg = CSR_READ(sc, WMREG_CTRL);
   15055 	if (mreg & CTRL_RFCE)
   15056 		preg |= BM_RCTL_RFCE;
   15057 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   15058 
   15059 	wuc = WUC_APME | WUC_PME_EN;
   15060 	wufc = WUFC_MAG;
   15061 	/* Enable PHY wakeup in MAC register */
   15062 	CSR_WRITE(sc, WMREG_WUC,
   15063 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   15064 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   15065 
   15066 	/* Configure and enable PHY wakeup in PHY registers */
   15067 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   15068 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   15069 
   15070 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   15071 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15072 
   15073 release:
   15074 	sc->phy.release(sc);
   15075 
   15076 	return 0;
   15077 }
   15078 
   15079 /* Power down workaround on D3 */
   15080 static void
   15081 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   15082 {
   15083 	uint32_t reg;
   15084 	uint16_t phyreg;
   15085 	int i;
   15086 
   15087 	for (i = 0; i < 2; i++) {
   15088 		/* Disable link */
   15089 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15090 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15091 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15092 
   15093 		/*
   15094 		 * Call gig speed drop workaround on Gig disable before
   15095 		 * accessing any PHY registers
   15096 		 */
   15097 		if (sc->sc_type == WM_T_ICH8)
   15098 			wm_gig_downshift_workaround_ich8lan(sc);
   15099 
   15100 		/* Write VR power-down enable */
   15101 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15102 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15103 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   15104 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   15105 
   15106 		/* Read it back and test */
   15107 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   15108 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   15109 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   15110 			break;
   15111 
   15112 		/* Issue PHY reset and repeat at most one more time */
   15113 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   15114 	}
   15115 }
   15116 
   15117 /*
   15118  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   15119  *  @sc: pointer to the HW structure
   15120  *
   15121  *  During S0 to Sx transition, it is possible the link remains at gig
   15122  *  instead of negotiating to a lower speed.  Before going to Sx, set
   15123  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   15124  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   15125  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   15126  *  needs to be written.
   15127  *  Parts that support (and are linked to a partner which support) EEE in
   15128  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   15129  *  than 10Mbps w/o EEE.
   15130  */
   15131 static void
   15132 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   15133 {
   15134 	device_t dev = sc->sc_dev;
   15135 	struct ethercom *ec = &sc->sc_ethercom;
   15136 	uint32_t phy_ctrl;
   15137 	int rv;
   15138 
   15139 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   15140 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   15141 
   15142 	KASSERT((sc->sc_type >= WM_T_ICH8) && (sc->sc_type <= WM_T_PCH_CNP));
   15143 
   15144 	if (sc->sc_phytype == WMPHY_I217) {
   15145 		uint16_t devid = sc->sc_pcidevid;
   15146 
   15147 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   15148 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   15149 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   15150 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   15151 		    (sc->sc_type >= WM_T_PCH_SPT))
   15152 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   15153 			    CSR_READ(sc, WMREG_FEXTNVM6)
   15154 			    & ~FEXTNVM6_REQ_PLL_CLK);
   15155 
   15156 		if (sc->phy.acquire(sc) != 0)
   15157 			goto out;
   15158 
   15159 		if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15160 			uint16_t eee_advert;
   15161 
   15162 			rv = wm_read_emi_reg_locked(dev,
   15163 			    I217_EEE_ADVERTISEMENT, &eee_advert);
   15164 			if (rv)
   15165 				goto release;
   15166 
   15167 			/*
   15168 			 * Disable LPLU if both link partners support 100BaseT
   15169 			 * EEE and 100Full is advertised on both ends of the
   15170 			 * link, and enable Auto Enable LPI since there will
   15171 			 * be no driver to enable LPI while in Sx.
   15172 			 */
   15173 			if ((eee_advert & AN_EEEADVERT_100_TX) &&
   15174 			    (sc->eee_lp_ability & AN_EEEADVERT_100_TX)) {
   15175 				uint16_t anar, phy_reg;
   15176 
   15177 				sc->phy.readreg_locked(dev, 2, MII_ANAR,
   15178 				    &anar);
   15179 				if (anar & ANAR_TX_FD) {
   15180 					phy_ctrl &= ~(PHY_CTRL_D0A_LPLU |
   15181 					    PHY_CTRL_NOND0A_LPLU);
   15182 
   15183 					/* Set Auto Enable LPI after link up */
   15184 					sc->phy.readreg_locked(dev, 2,
   15185 					    I217_LPI_GPIO_CTRL, &phy_reg);
   15186 					phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15187 					sc->phy.writereg_locked(dev, 2,
   15188 					    I217_LPI_GPIO_CTRL, phy_reg);
   15189 				}
   15190 			}
   15191 		}
   15192 
   15193 		/*
   15194 		 * For i217 Intel Rapid Start Technology support,
   15195 		 * when the system is going into Sx and no manageability engine
   15196 		 * is present, the driver must configure proxy to reset only on
   15197 		 * power good.	LPI (Low Power Idle) state must also reset only
   15198 		 * on power good, as well as the MTA (Multicast table array).
   15199 		 * The SMBus release must also be disabled on LCD reset.
   15200 		 */
   15201 
   15202 		/*
   15203 		 * Enable MTA to reset for Intel Rapid Start Technology
   15204 		 * Support
   15205 		 */
   15206 
   15207 release:
   15208 		sc->phy.release(sc);
   15209 	}
   15210 out:
   15211 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   15212 
   15213 	if (sc->sc_type == WM_T_ICH8)
   15214 		wm_gig_downshift_workaround_ich8lan(sc);
   15215 
   15216 	if (sc->sc_type >= WM_T_PCH) {
   15217 		wm_oem_bits_config_ich8lan(sc, false);
   15218 
   15219 		/* Reset PHY to activate OEM bits on 82577/8 */
   15220 		if (sc->sc_type == WM_T_PCH)
   15221 			wm_reset_phy(sc);
   15222 
   15223 		if (sc->phy.acquire(sc) != 0)
   15224 			return;
   15225 		wm_write_smbus_addr(sc);
   15226 		sc->phy.release(sc);
   15227 	}
   15228 }
   15229 
   15230 /*
   15231  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   15232  *  @sc: pointer to the HW structure
   15233  *
   15234  *  During Sx to S0 transitions on non-managed devices or managed devices
   15235  *  on which PHY resets are not blocked, if the PHY registers cannot be
   15236  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   15237  *  the PHY.
   15238  *  On i217, setup Intel Rapid Start Technology.
   15239  */
   15240 static int
   15241 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   15242 {
   15243 	device_t dev = sc->sc_dev;
   15244 	int rv;
   15245 
   15246 	if (sc->sc_type < WM_T_PCH2)
   15247 		return 0;
   15248 
   15249 	rv = wm_init_phy_workarounds_pchlan(sc);
   15250 	if (rv != 0)
   15251 		return -1;
   15252 
   15253 	/* For i217 Intel Rapid Start Technology support when the system
   15254 	 * is transitioning from Sx and no manageability engine is present
   15255 	 * configure SMBus to restore on reset, disable proxy, and enable
   15256 	 * the reset on MTA (Multicast table array).
   15257 	 */
   15258 	if (sc->sc_phytype == WMPHY_I217) {
   15259 		uint16_t phy_reg;
   15260 
   15261 		if (sc->phy.acquire(sc) != 0)
   15262 			return -1;
   15263 
   15264 		/* Clear Auto Enable LPI after link up */
   15265 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   15266 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   15267 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   15268 
   15269 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15270 			/* Restore clear on SMB if no manageability engine
   15271 			 * is present
   15272 			 */
   15273 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   15274 			    &phy_reg);
   15275 			if (rv != 0)
   15276 				goto release;
   15277 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   15278 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   15279 
   15280 			/* Disable Proxy */
   15281 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   15282 		}
   15283 		/* Enable reset on MTA */
   15284 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   15285 		if (rv != 0)
   15286 			goto release;
   15287 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   15288 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   15289 
   15290 release:
   15291 		sc->phy.release(sc);
   15292 		return rv;
   15293 	}
   15294 
   15295 	return 0;
   15296 }
   15297 
   15298 static void
   15299 wm_enable_wakeup(struct wm_softc *sc)
   15300 {
   15301 	uint32_t reg, pmreg;
   15302 	pcireg_t pmode;
   15303 	int rv = 0;
   15304 
   15305 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15306 		device_xname(sc->sc_dev), __func__));
   15307 
   15308 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15309 	    &pmreg, NULL) == 0)
   15310 		return;
   15311 
   15312 	if ((sc->sc_flags & WM_F_WOL) == 0)
   15313 		goto pme;
   15314 
   15315 	/* Advertise the wakeup capability */
   15316 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   15317 	    | CTRL_SWDPIN(3));
   15318 
   15319 	/* Keep the laser running on fiber adapters */
   15320 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   15321 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   15322 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15323 		reg |= CTRL_EXT_SWDPIN(3);
   15324 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15325 	}
   15326 
   15327 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   15328 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   15329 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   15330 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   15331 		wm_suspend_workarounds_ich8lan(sc);
   15332 
   15333 #if 0	/* For the multicast packet */
   15334 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   15335 	reg |= WUFC_MC;
   15336 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   15337 #endif
   15338 
   15339 	if (sc->sc_type >= WM_T_PCH) {
   15340 		rv = wm_enable_phy_wakeup(sc);
   15341 		if (rv != 0)
   15342 			goto pme;
   15343 	} else {
   15344 		/* Enable wakeup by the MAC */
   15345 		CSR_WRITE(sc, WMREG_WUC, WUC_APME | WUC_PME_EN);
   15346 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   15347 	}
   15348 
   15349 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   15350 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   15351 		|| (sc->sc_type == WM_T_PCH2))
   15352 	    && (sc->sc_phytype == WMPHY_IGP_3))
   15353 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   15354 
   15355 pme:
   15356 	/* Request PME */
   15357 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   15358 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   15359 		/* For WOL */
   15360 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   15361 	} else {
   15362 		/* Disable WOL */
   15363 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   15364 	}
   15365 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   15366 }
   15367 
   15368 /* Disable ASPM L0s and/or L1 for workaround */
   15369 static void
   15370 wm_disable_aspm(struct wm_softc *sc)
   15371 {
   15372 	pcireg_t reg, mask = 0;
   15373 	unsigned const char *str = "";
   15374 
   15375 	/*
   15376 	 *  Only for PCIe device which has PCIe capability in the PCI config
   15377 	 * space.
   15378 	 */
   15379 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   15380 		return;
   15381 
   15382 	switch (sc->sc_type) {
   15383 	case WM_T_82571:
   15384 	case WM_T_82572:
   15385 		/*
   15386 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   15387 		 * State Power management L1 State (ASPM L1).
   15388 		 */
   15389 		mask = PCIE_LCSR_ASPM_L1;
   15390 		str = "L1 is";
   15391 		break;
   15392 	case WM_T_82573:
   15393 	case WM_T_82574:
   15394 	case WM_T_82583:
   15395 		/*
   15396 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   15397 		 *
   15398 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   15399 		 * some chipset.  The document of 82574 and 82583 says that
   15400 		 * disabling L0s with some specific chipset is sufficient,
   15401 		 * but we follow as of the Intel em driver does.
   15402 		 *
   15403 		 * References:
   15404 		 * Errata 8 of the Specification Update of i82573.
   15405 		 * Errata 20 of the Specification Update of i82574.
   15406 		 * Errata 9 of the Specification Update of i82583.
   15407 		 */
   15408 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   15409 		str = "L0s and L1 are";
   15410 		break;
   15411 	default:
   15412 		return;
   15413 	}
   15414 
   15415 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15416 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   15417 	reg &= ~mask;
   15418 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15419 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   15420 
   15421 	/* Print only in wm_attach() */
   15422 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   15423 		aprint_verbose_dev(sc->sc_dev,
   15424 		    "ASPM %s disabled to workaround the errata.\n", str);
   15425 }
   15426 
   15427 /* LPLU */
   15428 
   15429 static void
   15430 wm_lplu_d0_disable(struct wm_softc *sc)
   15431 {
   15432 	struct mii_data *mii = &sc->sc_mii;
   15433 	uint32_t reg;
   15434 	uint16_t phyval;
   15435 
   15436 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15437 		device_xname(sc->sc_dev), __func__));
   15438 
   15439 	if (sc->sc_phytype == WMPHY_IFE)
   15440 		return;
   15441 
   15442 	switch (sc->sc_type) {
   15443 	case WM_T_82571:
   15444 	case WM_T_82572:
   15445 	case WM_T_82573:
   15446 	case WM_T_82575:
   15447 	case WM_T_82576:
   15448 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   15449 		phyval &= ~PMR_D0_LPLU;
   15450 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   15451 		break;
   15452 	case WM_T_82580:
   15453 	case WM_T_I350:
   15454 	case WM_T_I210:
   15455 	case WM_T_I211:
   15456 		reg = CSR_READ(sc, WMREG_PHPM);
   15457 		reg &= ~PHPM_D0A_LPLU;
   15458 		CSR_WRITE(sc, WMREG_PHPM, reg);
   15459 		break;
   15460 	case WM_T_82574:
   15461 	case WM_T_82583:
   15462 	case WM_T_ICH8:
   15463 	case WM_T_ICH9:
   15464 	case WM_T_ICH10:
   15465 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15466 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   15467 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15468 		CSR_WRITE_FLUSH(sc);
   15469 		break;
   15470 	case WM_T_PCH:
   15471 	case WM_T_PCH2:
   15472 	case WM_T_PCH_LPT:
   15473 	case WM_T_PCH_SPT:
   15474 	case WM_T_PCH_CNP:
   15475 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15476 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15477 		if (wm_phy_resetisblocked(sc) == false)
   15478 			phyval |= HV_OEM_BITS_ANEGNOW;
   15479 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15480 		break;
   15481 	default:
   15482 		break;
   15483 	}
   15484 }
   15485 
   15486 /* EEE */
   15487 
   15488 static int
   15489 wm_set_eee_i350(struct wm_softc *sc)
   15490 {
   15491 	struct ethercom *ec = &sc->sc_ethercom;
   15492 	uint32_t ipcnfg, eeer;
   15493 	uint32_t ipcnfg_mask
   15494 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15495 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15496 
   15497 	KASSERT(sc->sc_mediatype == WM_MEDIATYPE_COPPER);
   15498 
   15499 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15500 	eeer = CSR_READ(sc, WMREG_EEER);
   15501 
   15502 	/* Enable or disable per user setting */
   15503 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15504 		ipcnfg |= ipcnfg_mask;
   15505 		eeer |= eeer_mask;
   15506 	} else {
   15507 		ipcnfg &= ~ipcnfg_mask;
   15508 		eeer &= ~eeer_mask;
   15509 	}
   15510 
   15511 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15512 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15513 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15514 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15515 
   15516 	return 0;
   15517 }
   15518 
   15519 static int
   15520 wm_set_eee_pchlan(struct wm_softc *sc)
   15521 {
   15522 	device_t dev = sc->sc_dev;
   15523 	struct ethercom *ec = &sc->sc_ethercom;
   15524 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15525 	int rv = 0;
   15526 
   15527 	switch (sc->sc_phytype) {
   15528 	case WMPHY_82579:
   15529 		lpa = I82579_EEE_LP_ABILITY;
   15530 		pcs_status = I82579_EEE_PCS_STATUS;
   15531 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15532 		break;
   15533 	case WMPHY_I217:
   15534 		lpa = I217_EEE_LP_ABILITY;
   15535 		pcs_status = I217_EEE_PCS_STATUS;
   15536 		adv_addr = I217_EEE_ADVERTISEMENT;
   15537 		break;
   15538 	default:
   15539 		return 0;
   15540 	}
   15541 
   15542 	if (sc->phy.acquire(sc)) {
   15543 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15544 		return 0;
   15545 	}
   15546 
   15547 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15548 	if (rv != 0)
   15549 		goto release;
   15550 
   15551 	/* Clear bits that enable EEE in various speeds */
   15552 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15553 
   15554 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15555 		/* Save off link partner's EEE ability */
   15556 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15557 		if (rv != 0)
   15558 			goto release;
   15559 
   15560 		/* Read EEE advertisement */
   15561 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15562 			goto release;
   15563 
   15564 		/*
   15565 		 * Enable EEE only for speeds in which the link partner is
   15566 		 * EEE capable and for which we advertise EEE.
   15567 		 */
   15568 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15569 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15570 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15571 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15572 			if ((data & ANLPAR_TX_FD) != 0)
   15573 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15574 			else {
   15575 				/*
   15576 				 * EEE is not supported in 100Half, so ignore
   15577 				 * partner's EEE in 100 ability if full-duplex
   15578 				 * is not advertised.
   15579 				 */
   15580 				sc->eee_lp_ability
   15581 				    &= ~AN_EEEADVERT_100_TX;
   15582 			}
   15583 		}
   15584 	}
   15585 
   15586 	if (sc->sc_phytype == WMPHY_82579) {
   15587 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15588 		if (rv != 0)
   15589 			goto release;
   15590 
   15591 		data &= ~I82579_LPI_PLL_SHUT_100;
   15592 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15593 	}
   15594 
   15595 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15596 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15597 		goto release;
   15598 
   15599 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15600 release:
   15601 	sc->phy.release(sc);
   15602 
   15603 	return rv;
   15604 }
   15605 
   15606 static int
   15607 wm_set_eee(struct wm_softc *sc)
   15608 {
   15609 	struct ethercom *ec = &sc->sc_ethercom;
   15610 
   15611 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15612 		return 0;
   15613 
   15614 	if (sc->sc_type == WM_T_I354) {
   15615 		/* I354 uses an external PHY */
   15616 		return 0; /* not yet */
   15617 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15618 		return wm_set_eee_i350(sc);
   15619 	else if (sc->sc_type >= WM_T_PCH2)
   15620 		return wm_set_eee_pchlan(sc);
   15621 
   15622 	return 0;
   15623 }
   15624 
   15625 /*
   15626  * Workarounds (mainly PHY related).
   15627  * Basically, PHY's workarounds are in the PHY drivers.
   15628  */
   15629 
   15630 /* Work-around for 82566 Kumeran PCS lock loss */
   15631 static int
   15632 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15633 {
   15634 	struct mii_data *mii = &sc->sc_mii;
   15635 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15636 	int i, reg, rv;
   15637 	uint16_t phyreg;
   15638 
   15639 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15640 		device_xname(sc->sc_dev), __func__));
   15641 
   15642 	/* If the link is not up, do nothing */
   15643 	if ((status & STATUS_LU) == 0)
   15644 		return 0;
   15645 
   15646 	/* Nothing to do if the link is other than 1Gbps */
   15647 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15648 		return 0;
   15649 
   15650 	for (i = 0; i < 10; i++) {
   15651 		/* read twice */
   15652 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15653 		if (rv != 0)
   15654 			return rv;
   15655 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15656 		if (rv != 0)
   15657 			return rv;
   15658 
   15659 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15660 			goto out;	/* GOOD! */
   15661 
   15662 		/* Reset the PHY */
   15663 		wm_reset_phy(sc);
   15664 		delay(5*1000);
   15665 	}
   15666 
   15667 	/* Disable GigE link negotiation */
   15668 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15669 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15670 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15671 
   15672 	/*
   15673 	 * Call gig speed drop workaround on Gig disable before accessing
   15674 	 * any PHY registers.
   15675 	 */
   15676 	wm_gig_downshift_workaround_ich8lan(sc);
   15677 
   15678 out:
   15679 	return 0;
   15680 }
   15681 
   15682 /*
   15683  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15684  *  @sc: pointer to the HW structure
   15685  *
   15686  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15687  *  LPLU, Gig disable, MDIC PHY reset):
   15688  *    1) Set Kumeran Near-end loopback
   15689  *    2) Clear Kumeran Near-end loopback
   15690  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15691  */
   15692 static void
   15693 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15694 {
   15695 	uint16_t kmreg;
   15696 
   15697 	/* Only for igp3 */
   15698 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15699 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15700 			return;
   15701 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15702 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15703 			return;
   15704 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15705 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15706 	}
   15707 }
   15708 
   15709 /*
   15710  * Workaround for pch's PHYs
   15711  * XXX should be moved to new PHY driver?
   15712  */
   15713 static int
   15714 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15715 {
   15716 	device_t dev = sc->sc_dev;
   15717 	struct mii_data *mii = &sc->sc_mii;
   15718 	struct mii_softc *child;
   15719 	uint16_t phy_data, phyrev = 0;
   15720 	int phytype = sc->sc_phytype;
   15721 	int rv;
   15722 
   15723 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15724 		device_xname(dev), __func__));
   15725 	KASSERT(sc->sc_type == WM_T_PCH);
   15726 
   15727 	/* Set MDIO slow mode before any other MDIO access */
   15728 	if (phytype == WMPHY_82577)
   15729 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15730 			return rv;
   15731 
   15732 	child = LIST_FIRST(&mii->mii_phys);
   15733 	if (child != NULL)
   15734 		phyrev = child->mii_mpd_rev;
   15735 
   15736 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15737 	if ((child != NULL) &&
   15738 	    (((phytype == WMPHY_82577) && ((phyrev == 1) || (phyrev == 2))) ||
   15739 		((phytype == WMPHY_82578) && (phyrev == 1)))) {
   15740 		/* Disable generation of early preamble (0x4431) */
   15741 		rv = mii->mii_readreg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15742 		    &phy_data);
   15743 		if (rv != 0)
   15744 			return rv;
   15745 		phy_data &= ~(BM_RATE_ADAPTATION_CTRL_RX_RXDV_PRE |
   15746 		    BM_RATE_ADAPTATION_CTRL_RX_CRS_PRE);
   15747 		rv = mii->mii_writereg(dev, 2, BM_RATE_ADAPTATION_CTRL,
   15748 		    phy_data);
   15749 		if (rv != 0)
   15750 			return rv;
   15751 
   15752 		/* Preamble tuning for SSC */
   15753 		rv = mii->mii_writereg(dev, 2, HV_KMRN_FIFO_CTRLSTA, 0xa204);
   15754 		if (rv != 0)
   15755 			return rv;
   15756 	}
   15757 
   15758 	/* 82578 */
   15759 	if (phytype == WMPHY_82578) {
   15760 		/*
   15761 		 * Return registers to default by doing a soft reset then
   15762 		 * writing 0x3140 to the control register
   15763 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15764 		 */
   15765 		if ((child != NULL) && (phyrev < 2)) {
   15766 			PHY_RESET(child);
   15767 			rv = mii->mii_writereg(dev, 2, MII_BMCR, 0x3140);
   15768 			if (rv != 0)
   15769 				return rv;
   15770 		}
   15771 	}
   15772 
   15773 	/* Select page 0 */
   15774 	if ((rv = sc->phy.acquire(sc)) != 0)
   15775 		return rv;
   15776 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15777 	sc->phy.release(sc);
   15778 	if (rv != 0)
   15779 		return rv;
   15780 
   15781 	/*
   15782 	 * Configure the K1 Si workaround during phy reset assuming there is
   15783 	 * link so that it disables K1 if link is in 1Gbps.
   15784 	 */
   15785 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15786 		return rv;
   15787 
   15788 	/* Workaround for link disconnects on a busy hub in half duplex */
   15789 	rv = sc->phy.acquire(sc);
   15790 	if (rv)
   15791 		return rv;
   15792 	rv = sc->phy.readreg_locked(dev, 2, BM_PORT_GEN_CFG, &phy_data);
   15793 	if (rv)
   15794 		goto release;
   15795 	rv = sc->phy.writereg_locked(dev, 2, BM_PORT_GEN_CFG,
   15796 	    phy_data & 0x00ff);
   15797 	if (rv)
   15798 		goto release;
   15799 
   15800 	/* Set MSE higher to enable link to stay up when noise is high */
   15801 	rv = wm_write_emi_reg_locked(dev, I82577_MSE_THRESHOLD, 0x0034);
   15802 release:
   15803 	sc->phy.release(sc);
   15804 
   15805 	return rv;
   15806 }
   15807 
   15808 /*
   15809  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15810  *  @sc:   pointer to the HW structure
   15811  */
   15812 static void
   15813 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15814 {
   15815 	device_t dev = sc->sc_dev;
   15816 	uint32_t mac_reg;
   15817 	uint16_t i, wuce;
   15818 	int count;
   15819 
   15820 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15821 		device_xname(sc->sc_dev), __func__));
   15822 
   15823 	if (sc->phy.acquire(sc) != 0)
   15824 		return;
   15825 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15826 		goto release;
   15827 
   15828 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15829 	count = wm_rar_count(sc);
   15830 	for (i = 0; i < count; i++) {
   15831 		uint16_t lo, hi;
   15832 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15833 		lo = (uint16_t)(mac_reg & 0xffff);
   15834 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15835 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15836 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15837 
   15838 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15839 		lo = (uint16_t)(mac_reg & 0xffff);
   15840 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15841 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15842 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15843 	}
   15844 
   15845 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15846 
   15847 release:
   15848 	sc->phy.release(sc);
   15849 }
   15850 
   15851 /*
   15852  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15853  *  done after every PHY reset.
   15854  */
   15855 static int
   15856 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15857 {
   15858 	device_t dev = sc->sc_dev;
   15859 	int rv;
   15860 
   15861 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15862 		device_xname(dev), __func__));
   15863 	KASSERT(sc->sc_type == WM_T_PCH2);
   15864 
   15865 	/* Set MDIO slow mode before any other MDIO access */
   15866 	rv = wm_set_mdio_slow_mode_hv(sc);
   15867 	if (rv != 0)
   15868 		return rv;
   15869 
   15870 	rv = sc->phy.acquire(sc);
   15871 	if (rv != 0)
   15872 		return rv;
   15873 	/* Set MSE higher to enable link to stay up when noise is high */
   15874 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_THRESHOLD, 0x0034);
   15875 	if (rv != 0)
   15876 		goto release;
   15877 	/* Drop link after 5 times MSE threshold was reached */
   15878 	rv = wm_write_emi_reg_locked(dev, I82579_MSE_LINK_DOWN, 0x0005);
   15879 release:
   15880 	sc->phy.release(sc);
   15881 
   15882 	return rv;
   15883 }
   15884 
   15885 /**
   15886  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15887  *  @link: link up bool flag
   15888  *
   15889  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15890  *  preventing further DMA write requests.  Workaround the issue by disabling
   15891  *  the de-assertion of the clock request when in 1Gpbs mode.
   15892  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15893  *  speeds in order to avoid Tx hangs.
   15894  **/
   15895 static int
   15896 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15897 {
   15898 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15899 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15900 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15901 	uint16_t phyreg;
   15902 
   15903 	if (link && (speed == STATUS_SPEED_1000)) {
   15904 		sc->phy.acquire(sc);
   15905 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15906 		    &phyreg);
   15907 		if (rv != 0)
   15908 			goto release;
   15909 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15910 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15911 		if (rv != 0)
   15912 			goto release;
   15913 		delay(20);
   15914 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15915 
   15916 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15917 		    &phyreg);
   15918 release:
   15919 		sc->phy.release(sc);
   15920 		return rv;
   15921 	}
   15922 
   15923 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15924 
   15925 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15926 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15927 	    || !link
   15928 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15929 		goto update_fextnvm6;
   15930 
   15931 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15932 
   15933 	/* Clear link status transmit timeout */
   15934 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15935 	if (speed == STATUS_SPEED_100) {
   15936 		/* Set inband Tx timeout to 5x10us for 100Half */
   15937 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15938 
   15939 		/* Do not extend the K1 entry latency for 100Half */
   15940 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15941 	} else {
   15942 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15943 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15944 
   15945 		/* Extend the K1 entry latency for 10 Mbps */
   15946 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15947 	}
   15948 
   15949 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15950 
   15951 update_fextnvm6:
   15952 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15953 	return 0;
   15954 }
   15955 
   15956 /*
   15957  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15958  *  @sc:   pointer to the HW structure
   15959  *  @link: link up bool flag
   15960  *
   15961  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15962  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15963  *  If link is down, the function will restore the default K1 setting located
   15964  *  in the NVM.
   15965  */
   15966 static int
   15967 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15968 {
   15969 	int k1_enable = sc->sc_nvm_k1_enabled;
   15970 
   15971 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15972 		device_xname(sc->sc_dev), __func__));
   15973 
   15974 	if (sc->phy.acquire(sc) != 0)
   15975 		return -1;
   15976 
   15977 	if (link) {
   15978 		k1_enable = 0;
   15979 
   15980 		/* Link stall fix for link up */
   15981 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15982 		    0x0100);
   15983 	} else {
   15984 		/* Link stall fix for link down */
   15985 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15986 		    0x4100);
   15987 	}
   15988 
   15989 	wm_configure_k1_ich8lan(sc, k1_enable);
   15990 	sc->phy.release(sc);
   15991 
   15992 	return 0;
   15993 }
   15994 
   15995 /*
   15996  *  wm_k1_workaround_lv - K1 Si workaround
   15997  *  @sc:   pointer to the HW structure
   15998  *
   15999  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   16000  *  Disable K1 for 1000 and 100 speeds
   16001  */
   16002 static int
   16003 wm_k1_workaround_lv(struct wm_softc *sc)
   16004 {
   16005 	uint32_t reg;
   16006 	uint16_t phyreg;
   16007 	int rv;
   16008 
   16009 	if (sc->sc_type != WM_T_PCH2)
   16010 		return 0;
   16011 
   16012 	/* Set K1 beacon duration based on 10Mbps speed */
   16013 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   16014 	if (rv != 0)
   16015 		return rv;
   16016 
   16017 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   16018 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   16019 		if (phyreg &
   16020 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   16021 			/* LV 1G/100 Packet drop issue wa  */
   16022 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   16023 			    &phyreg);
   16024 			if (rv != 0)
   16025 				return rv;
   16026 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   16027 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   16028 			    phyreg);
   16029 			if (rv != 0)
   16030 				return rv;
   16031 		} else {
   16032 			/* For 10Mbps */
   16033 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   16034 			reg &= ~FEXTNVM4_BEACON_DURATION;
   16035 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   16036 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   16037 		}
   16038 	}
   16039 
   16040 	return 0;
   16041 }
   16042 
   16043 /*
   16044  *  wm_link_stall_workaround_hv - Si workaround
   16045  *  @sc: pointer to the HW structure
   16046  *
   16047  *  This function works around a Si bug where the link partner can get
   16048  *  a link up indication before the PHY does. If small packets are sent
   16049  *  by the link partner they can be placed in the packet buffer without
   16050  *  being properly accounted for by the PHY and will stall preventing
   16051  *  further packets from being received.  The workaround is to clear the
   16052  *  packet buffer after the PHY detects link up.
   16053  */
   16054 static int
   16055 wm_link_stall_workaround_hv(struct wm_softc *sc)
   16056 {
   16057 	uint16_t phyreg;
   16058 
   16059 	if (sc->sc_phytype != WMPHY_82578)
   16060 		return 0;
   16061 
   16062 	/* Do not apply workaround if in PHY loopback bit 14 set */
   16063 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   16064 	if ((phyreg & BMCR_LOOP) != 0)
   16065 		return 0;
   16066 
   16067 	/* Check if link is up and at 1Gbps */
   16068 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   16069 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16070 	    | BM_CS_STATUS_SPEED_MASK;
   16071 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   16072 		| BM_CS_STATUS_SPEED_1000))
   16073 		return 0;
   16074 
   16075 	delay(200 * 1000);	/* XXX too big */
   16076 
   16077 	/* Flush the packets in the fifo buffer */
   16078 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16079 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   16080 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   16081 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   16082 
   16083 	return 0;
   16084 }
   16085 
   16086 static int
   16087 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   16088 {
   16089 	int rv;
   16090 	uint16_t reg;
   16091 
   16092 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   16093 	if (rv != 0)
   16094 		return rv;
   16095 
   16096 	return wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   16097 	    reg | HV_KMRN_MDIO_SLOW);
   16098 }
   16099 
   16100 /*
   16101  *  wm_configure_k1_ich8lan - Configure K1 power state
   16102  *  @sc: pointer to the HW structure
   16103  *  @enable: K1 state to configure
   16104  *
   16105  *  Configure the K1 power state based on the provided parameter.
   16106  *  Assumes semaphore already acquired.
   16107  */
   16108 static void
   16109 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   16110 {
   16111 	uint32_t ctrl, ctrl_ext, tmp;
   16112 	uint16_t kmreg;
   16113 	int rv;
   16114 
   16115 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16116 
   16117 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   16118 	if (rv != 0)
   16119 		return;
   16120 
   16121 	if (k1_enable)
   16122 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   16123 	else
   16124 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   16125 
   16126 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   16127 	if (rv != 0)
   16128 		return;
   16129 
   16130 	delay(20);
   16131 
   16132 	ctrl = CSR_READ(sc, WMREG_CTRL);
   16133 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   16134 
   16135 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   16136 	tmp |= CTRL_FRCSPD;
   16137 
   16138 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   16139 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   16140 	CSR_WRITE_FLUSH(sc);
   16141 	delay(20);
   16142 
   16143 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   16144 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   16145 	CSR_WRITE_FLUSH(sc);
   16146 	delay(20);
   16147 
   16148 	return;
   16149 }
   16150 
   16151 /* special case - for 82575 - need to do manual init ... */
   16152 static void
   16153 wm_reset_init_script_82575(struct wm_softc *sc)
   16154 {
   16155 	/*
   16156 	 * Remark: this is untested code - we have no board without EEPROM
   16157 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   16158 	 */
   16159 
   16160 	/* SerDes configuration via SERDESCTRL */
   16161 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   16162 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   16163 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   16164 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   16165 
   16166 	/* CCM configuration via CCMCTL register */
   16167 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   16168 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   16169 
   16170 	/* PCIe lanes configuration */
   16171 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   16172 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   16173 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   16174 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   16175 
   16176 	/* PCIe PLL Configuration */
   16177 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   16178 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   16179 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   16180 }
   16181 
   16182 static void
   16183 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   16184 {
   16185 	uint32_t reg;
   16186 	uint16_t nvmword;
   16187 	int rv;
   16188 
   16189 	if (sc->sc_type != WM_T_82580)
   16190 		return;
   16191 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   16192 		return;
   16193 
   16194 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   16195 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   16196 	if (rv != 0) {
   16197 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   16198 		    __func__);
   16199 		return;
   16200 	}
   16201 
   16202 	reg = CSR_READ(sc, WMREG_MDICNFG);
   16203 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   16204 		reg |= MDICNFG_DEST;
   16205 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   16206 		reg |= MDICNFG_COM_MDIO;
   16207 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16208 }
   16209 
   16210 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   16211 
   16212 static bool
   16213 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   16214 {
   16215 	uint32_t reg;
   16216 	uint16_t id1, id2;
   16217 	int i, rv;
   16218 
   16219 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16220 		device_xname(sc->sc_dev), __func__));
   16221 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   16222 
   16223 	id1 = id2 = 0xffff;
   16224 	for (i = 0; i < 2; i++) {
   16225 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   16226 		    &id1);
   16227 		if ((rv != 0) || MII_INVALIDID(id1))
   16228 			continue;
   16229 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   16230 		    &id2);
   16231 		if ((rv != 0) || MII_INVALIDID(id2))
   16232 			continue;
   16233 		break;
   16234 	}
   16235 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   16236 		goto out;
   16237 
   16238 	/*
   16239 	 * In case the PHY needs to be in mdio slow mode,
   16240 	 * set slow mode and try to get the PHY id again.
   16241 	 */
   16242 	rv = 0;
   16243 	if (sc->sc_type < WM_T_PCH_LPT) {
   16244 		sc->phy.release(sc);
   16245 		wm_set_mdio_slow_mode_hv(sc);
   16246 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   16247 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   16248 		sc->phy.acquire(sc);
   16249 	}
   16250 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   16251 		device_printf(sc->sc_dev, "XXX return with false\n");
   16252 		return false;
   16253 	}
   16254 out:
   16255 	if (sc->sc_type >= WM_T_PCH_LPT) {
   16256 		/* Only unforce SMBus if ME is not active */
   16257 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   16258 			uint16_t phyreg;
   16259 
   16260 			/* Unforce SMBus mode in PHY */
   16261 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   16262 			    CV_SMB_CTRL, &phyreg);
   16263 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   16264 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   16265 			    CV_SMB_CTRL, phyreg);
   16266 
   16267 			/* Unforce SMBus mode in MAC */
   16268 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16269 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   16270 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16271 		}
   16272 	}
   16273 	return true;
   16274 }
   16275 
   16276 static void
   16277 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   16278 {
   16279 	uint32_t reg;
   16280 	int i;
   16281 
   16282 	/* Set PHY Config Counter to 50msec */
   16283 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   16284 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   16285 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   16286 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   16287 
   16288 	/* Toggle LANPHYPC */
   16289 	reg = CSR_READ(sc, WMREG_CTRL);
   16290 	reg |= CTRL_LANPHYPC_OVERRIDE;
   16291 	reg &= ~CTRL_LANPHYPC_VALUE;
   16292 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16293 	CSR_WRITE_FLUSH(sc);
   16294 	delay(1000);
   16295 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   16296 	CSR_WRITE(sc, WMREG_CTRL, reg);
   16297 	CSR_WRITE_FLUSH(sc);
   16298 
   16299 	if (sc->sc_type < WM_T_PCH_LPT)
   16300 		delay(50 * 1000);
   16301 	else {
   16302 		i = 20;
   16303 
   16304 		do {
   16305 			delay(5 * 1000);
   16306 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   16307 		    && i--);
   16308 
   16309 		delay(30 * 1000);
   16310 	}
   16311 }
   16312 
   16313 static int
   16314 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   16315 {
   16316 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   16317 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   16318 	uint32_t rxa;
   16319 	uint16_t scale = 0, lat_enc = 0;
   16320 	int32_t obff_hwm = 0;
   16321 	int64_t lat_ns, value;
   16322 
   16323 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16324 		device_xname(sc->sc_dev), __func__));
   16325 
   16326 	if (link) {
   16327 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   16328 		uint32_t status;
   16329 		uint16_t speed;
   16330 		pcireg_t preg;
   16331 
   16332 		status = CSR_READ(sc, WMREG_STATUS);
   16333 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   16334 		case STATUS_SPEED_10:
   16335 			speed = 10;
   16336 			break;
   16337 		case STATUS_SPEED_100:
   16338 			speed = 100;
   16339 			break;
   16340 		case STATUS_SPEED_1000:
   16341 			speed = 1000;
   16342 			break;
   16343 		default:
   16344 			device_printf(sc->sc_dev, "Unknown speed "
   16345 			    "(status = %08x)\n", status);
   16346 			return -1;
   16347 		}
   16348 
   16349 		/* Rx Packet Buffer Allocation size (KB) */
   16350 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   16351 
   16352 		/*
   16353 		 * Determine the maximum latency tolerated by the device.
   16354 		 *
   16355 		 * Per the PCIe spec, the tolerated latencies are encoded as
   16356 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   16357 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   16358 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   16359 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   16360 		 */
   16361 		lat_ns = ((int64_t)rxa * 1024 -
   16362 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   16363 			+ ETHER_HDR_LEN))) * 8 * 1000;
   16364 		if (lat_ns < 0)
   16365 			lat_ns = 0;
   16366 		else
   16367 			lat_ns /= speed;
   16368 		value = lat_ns;
   16369 
   16370 		while (value > LTRV_VALUE) {
   16371 			scale ++;
   16372 			value = howmany(value, __BIT(5));
   16373 		}
   16374 		if (scale > LTRV_SCALE_MAX) {
   16375 			device_printf(sc->sc_dev,
   16376 			    "Invalid LTR latency scale %d\n", scale);
   16377 			return -1;
   16378 		}
   16379 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   16380 
   16381 		/* Determine the maximum latency tolerated by the platform */
   16382 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16383 		    WM_PCI_LTR_CAP_LPT);
   16384 		max_snoop = preg & 0xffff;
   16385 		max_nosnoop = preg >> 16;
   16386 
   16387 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   16388 
   16389 		if (lat_enc > max_ltr_enc) {
   16390 			lat_enc = max_ltr_enc;
   16391 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   16392 			    * PCI_LTR_SCALETONS(
   16393 				    __SHIFTOUT(lat_enc,
   16394 					PCI_LTR_MAXSNOOPLAT_SCALE));
   16395 		}
   16396 
   16397 		if (lat_ns) {
   16398 			lat_ns *= speed * 1000;
   16399 			lat_ns /= 8;
   16400 			lat_ns /= 1000000000;
   16401 			obff_hwm = (int32_t)(rxa - lat_ns);
   16402 		}
   16403 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   16404 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   16405 			    "(rxa = %d, lat_ns = %d)\n",
   16406 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   16407 			return -1;
   16408 		}
   16409 	}
   16410 	/* Snoop and No-Snoop latencies the same */
   16411 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   16412 	CSR_WRITE(sc, WMREG_LTRV, reg);
   16413 
   16414 	/* Set OBFF high water mark */
   16415 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   16416 	reg |= obff_hwm;
   16417 	CSR_WRITE(sc, WMREG_SVT, reg);
   16418 
   16419 	/* Enable OBFF */
   16420 	reg = CSR_READ(sc, WMREG_SVCR);
   16421 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   16422 	CSR_WRITE(sc, WMREG_SVCR, reg);
   16423 
   16424 	return 0;
   16425 }
   16426 
   16427 /*
   16428  * I210 Errata 25 and I211 Errata 10
   16429  * Slow System Clock.
   16430  */
   16431 static int
   16432 wm_pll_workaround_i210(struct wm_softc *sc)
   16433 {
   16434 	uint32_t mdicnfg, wuc;
   16435 	uint32_t reg;
   16436 	pcireg_t pcireg;
   16437 	uint32_t pmreg;
   16438 	uint16_t nvmword, tmp_nvmword;
   16439 	uint16_t phyval;
   16440 	bool wa_done = false;
   16441 	int i, rv = 0;
   16442 
   16443 	/* Get Power Management cap offset */
   16444 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   16445 	    &pmreg, NULL) == 0)
   16446 		return -1;
   16447 
   16448 	/* Save WUC and MDICNFG registers */
   16449 	wuc = CSR_READ(sc, WMREG_WUC);
   16450 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   16451 
   16452 	reg = mdicnfg & ~MDICNFG_DEST;
   16453 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   16454 
   16455 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   16456 		nvmword = INVM_DEFAULT_AL;
   16457 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   16458 
   16459 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   16460 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   16461 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   16462 
   16463 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   16464 			rv = 0;
   16465 			break; /* OK */
   16466 		} else
   16467 			rv = -1;
   16468 
   16469 		wa_done = true;
   16470 		/* Directly reset the internal PHY */
   16471 		reg = CSR_READ(sc, WMREG_CTRL);
   16472 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   16473 
   16474 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   16475 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   16476 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   16477 
   16478 		CSR_WRITE(sc, WMREG_WUC, 0);
   16479 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   16480 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16481 
   16482 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   16483 		    pmreg + PCI_PMCSR);
   16484 		pcireg |= PCI_PMCSR_STATE_D3;
   16485 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16486 		    pmreg + PCI_PMCSR, pcireg);
   16487 		delay(1000);
   16488 		pcireg &= ~PCI_PMCSR_STATE_D3;
   16489 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   16490 		    pmreg + PCI_PMCSR, pcireg);
   16491 
   16492 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   16493 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   16494 
   16495 		/* Restore WUC register */
   16496 		CSR_WRITE(sc, WMREG_WUC, wuc);
   16497 	}
   16498 
   16499 	/* Restore MDICNFG setting */
   16500 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   16501 	if (wa_done)
   16502 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   16503 	return rv;
   16504 }
   16505 
   16506 static void
   16507 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   16508 {
   16509 	uint32_t reg;
   16510 
   16511 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   16512 		device_xname(sc->sc_dev), __func__));
   16513 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   16514 	    || (sc->sc_type == WM_T_PCH_CNP));
   16515 
   16516 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   16517 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   16518 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   16519 
   16520 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   16521 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   16522 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   16523 }
   16524