Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.620
      1 /*	$NetBSD: if_wm.c,v 1.620 2019/01/25 08:04:07 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.620 2019/01/25 08:04:07 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/mdio.h>
    133 #include <dev/mii/miivar.h>
    134 #include <dev/mii/miidevs.h>
    135 #include <dev/mii/mii_bitbang.h>
    136 #include <dev/mii/ikphyreg.h>
    137 #include <dev/mii/igphyreg.h>
    138 #include <dev/mii/igphyvar.h>
    139 #include <dev/mii/inbmphyreg.h>
    140 #include <dev/mii/ihphyreg.h>
    141 
    142 #include <dev/pci/pcireg.h>
    143 #include <dev/pci/pcivar.h>
    144 #include <dev/pci/pcidevs.h>
    145 
    146 #include <dev/pci/if_wmreg.h>
    147 #include <dev/pci/if_wmvar.h>
    148 
    149 #ifdef WM_DEBUG
    150 #define	WM_DEBUG_LINK		__BIT(0)
    151 #define	WM_DEBUG_TX		__BIT(1)
    152 #define	WM_DEBUG_RX		__BIT(2)
    153 #define	WM_DEBUG_GMII		__BIT(3)
    154 #define	WM_DEBUG_MANAGE		__BIT(4)
    155 #define	WM_DEBUG_NVM		__BIT(5)
    156 #define	WM_DEBUG_INIT		__BIT(6)
    157 #define	WM_DEBUG_LOCK		__BIT(7)
    158 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    159     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    160 
    161 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    162 #else
    163 #define	DPRINTF(x, y)	__nothing
    164 #endif /* WM_DEBUG */
    165 
    166 #ifdef NET_MPSAFE
    167 #define WM_MPSAFE	1
    168 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    169 #else
    170 #define CALLOUT_FLAGS	0
    171 #endif
    172 
    173 /*
    174  * This device driver's max interrupt numbers.
    175  */
    176 #define WM_MAX_NQUEUEINTR	16
    177 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    178 
    179 #ifndef WM_DISABLE_MSI
    180 #define	WM_DISABLE_MSI 0
    181 #endif
    182 #ifndef WM_DISABLE_MSIX
    183 #define	WM_DISABLE_MSIX 0
    184 #endif
    185 
    186 int wm_disable_msi = WM_DISABLE_MSI;
    187 int wm_disable_msix = WM_DISABLE_MSIX;
    188 
    189 #ifndef WM_WATCHDOG_TIMEOUT
    190 #define WM_WATCHDOG_TIMEOUT 5
    191 #endif
    192 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    193 
    194 /*
    195  * Transmit descriptor list size.  Due to errata, we can only have
    196  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    197  * on >= 82544. We tell the upper layers that they can queue a lot
    198  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    199  * of them at a time.
    200  *
    201  * We allow up to 64 DMA segments per packet.  Pathological packet
    202  * chains containing many small mbufs have been observed in zero-copy
    203  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    204  * m_defrag() is called to reduce it.
    205  */
    206 #define	WM_NTXSEGS		64
    207 #define	WM_IFQUEUELEN		256
    208 #define	WM_TXQUEUELEN_MAX	64
    209 #define	WM_TXQUEUELEN_MAX_82547	16
    210 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    211 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    212 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    213 #define	WM_NTXDESC_82542	256
    214 #define	WM_NTXDESC_82544	4096
    215 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    216 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    217 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    218 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    219 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    220 
    221 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    222 
    223 #define	WM_TXINTERQSIZE		256
    224 
    225 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    226 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    227 #endif
    228 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    229 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    230 #endif
    231 
    232 /*
    233  * Receive descriptor list size.  We have one Rx buffer for normal
    234  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    235  * packet.  We allocate 256 receive descriptors, each with a 2k
    236  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    237  */
    238 #define	WM_NRXDESC		256
    239 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    240 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    241 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    242 
    243 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    244 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    245 #endif
    246 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    247 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    248 #endif
    249 
    250 typedef union txdescs {
    251 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    252 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    253 } txdescs_t;
    254 
    255 typedef union rxdescs {
    256 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    257 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    258 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    259 } rxdescs_t;
    260 
    261 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    262 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    263 
    264 /*
    265  * Software state for transmit jobs.
    266  */
    267 struct wm_txsoft {
    268 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    269 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    270 	int txs_firstdesc;		/* first descriptor in packet */
    271 	int txs_lastdesc;		/* last descriptor in packet */
    272 	int txs_ndesc;			/* # of descriptors used */
    273 };
    274 
    275 /*
    276  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    277  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    278  * them together.
    279  */
    280 struct wm_rxsoft {
    281 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    282 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    283 };
    284 
    285 #define WM_LINKUP_TIMEOUT	50
    286 
    287 static uint16_t swfwphysem[] = {
    288 	SWFW_PHY0_SM,
    289 	SWFW_PHY1_SM,
    290 	SWFW_PHY2_SM,
    291 	SWFW_PHY3_SM
    292 };
    293 
    294 static const uint32_t wm_82580_rxpbs_table[] = {
    295 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    296 };
    297 
    298 struct wm_softc;
    299 
    300 #ifdef WM_EVENT_COUNTERS
    301 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    302 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    303 	struct evcnt qname##_ev_##evname;
    304 
    305 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    306 	do {								\
    307 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    308 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    309 		    "%s%02d%s", #qname, (qnum), #evname);		\
    310 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    311 		    (evtype), NULL, (xname),				\
    312 		    (q)->qname##_##evname##_evcnt_name);		\
    313 	} while (0)
    314 
    315 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    316 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    317 
    318 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    319 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    320 
    321 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    322 	evcnt_detach(&(q)->qname##_ev_##evname);
    323 #endif /* WM_EVENT_COUNTERS */
    324 
    325 struct wm_txqueue {
    326 	kmutex_t *txq_lock;		/* lock for tx operations */
    327 
    328 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    329 
    330 	/* Software state for the transmit descriptors. */
    331 	int txq_num;			/* must be a power of two */
    332 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    333 
    334 	/* TX control data structures. */
    335 	int txq_ndesc;			/* must be a power of two */
    336 	size_t txq_descsize;		/* a tx descriptor size */
    337 	txdescs_t *txq_descs_u;
    338 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    339 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    340 	int txq_desc_rseg;		/* real number of control segment */
    341 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    342 #define	txq_descs	txq_descs_u->sctxu_txdescs
    343 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    344 
    345 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    346 
    347 	int txq_free;			/* number of free Tx descriptors */
    348 	int txq_next;			/* next ready Tx descriptor */
    349 
    350 	int txq_sfree;			/* number of free Tx jobs */
    351 	int txq_snext;			/* next free Tx job */
    352 	int txq_sdirty;			/* dirty Tx jobs */
    353 
    354 	/* These 4 variables are used only on the 82547. */
    355 	int txq_fifo_size;		/* Tx FIFO size */
    356 	int txq_fifo_head;		/* current head of FIFO */
    357 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    358 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    359 
    360 	/*
    361 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    362 	 * CPUs. This queue intermediate them without block.
    363 	 */
    364 	pcq_t *txq_interq;
    365 
    366 	/*
    367 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    368 	 * to manage Tx H/W queue's busy flag.
    369 	 */
    370 	int txq_flags;			/* flags for H/W queue, see below */
    371 #define	WM_TXQ_NO_SPACE	0x1
    372 
    373 	bool txq_stopping;
    374 
    375 	bool txq_sending;
    376 	time_t txq_lastsent;
    377 
    378 	uint32_t txq_packets;		/* for AIM */
    379 	uint32_t txq_bytes;		/* for AIM */
    380 #ifdef WM_EVENT_COUNTERS
    381 	/* TX event counters */
    382 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    383 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    384 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    385 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    386 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    387 					    /* XXX not used? */
    388 
    389 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    392 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    393 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    394 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    395 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    396 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    397 					    /* other than toomanyseg */
    398 
    399 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    400 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    401 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    402 
    403 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    404 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    405 #endif /* WM_EVENT_COUNTERS */
    406 };
    407 
    408 struct wm_rxqueue {
    409 	kmutex_t *rxq_lock;		/* lock for rx operations */
    410 
    411 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    412 
    413 	/* Software state for the receive descriptors. */
    414 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    415 
    416 	/* RX control data structures. */
    417 	int rxq_ndesc;			/* must be a power of two */
    418 	size_t rxq_descsize;		/* a rx descriptor size */
    419 	rxdescs_t *rxq_descs_u;
    420 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    421 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    422 	int rxq_desc_rseg;		/* real number of control segment */
    423 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    424 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    425 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    426 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    427 
    428 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    429 
    430 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    431 	int rxq_discard;
    432 	int rxq_len;
    433 	struct mbuf *rxq_head;
    434 	struct mbuf *rxq_tail;
    435 	struct mbuf **rxq_tailp;
    436 
    437 	bool rxq_stopping;
    438 
    439 	uint32_t rxq_packets;		/* for AIM */
    440 	uint32_t rxq_bytes;		/* for AIM */
    441 #ifdef WM_EVENT_COUNTERS
    442 	/* RX event counters */
    443 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    444 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    445 
    446 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    447 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    448 #endif
    449 };
    450 
    451 struct wm_queue {
    452 	int wmq_id;			/* index of TX/RX queues */
    453 	int wmq_intr_idx;		/* index of MSI-X tables */
    454 
    455 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    456 	bool wmq_set_itr;
    457 
    458 	struct wm_txqueue wmq_txq;
    459 	struct wm_rxqueue wmq_rxq;
    460 
    461 	void *wmq_si;
    462 };
    463 
    464 struct wm_phyop {
    465 	int (*acquire)(struct wm_softc *);
    466 	void (*release)(struct wm_softc *);
    467 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    468 	int (*writereg_locked)(device_t, int, int, uint16_t);
    469 	int reset_delay_us;
    470 };
    471 
    472 struct wm_nvmop {
    473 	int (*acquire)(struct wm_softc *);
    474 	void (*release)(struct wm_softc *);
    475 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    476 };
    477 
    478 /*
    479  * Software state per device.
    480  */
    481 struct wm_softc {
    482 	device_t sc_dev;		/* generic device information */
    483 	bus_space_tag_t sc_st;		/* bus space tag */
    484 	bus_space_handle_t sc_sh;	/* bus space handle */
    485 	bus_size_t sc_ss;		/* bus space size */
    486 	bus_space_tag_t sc_iot;		/* I/O space tag */
    487 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    488 	bus_size_t sc_ios;		/* I/O space size */
    489 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    490 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    491 	bus_size_t sc_flashs;		/* flash registers space size */
    492 	off_t sc_flashreg_offset;	/*
    493 					 * offset to flash registers from
    494 					 * start of BAR
    495 					 */
    496 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    497 
    498 	struct ethercom sc_ethercom;	/* ethernet common data */
    499 	struct mii_data sc_mii;		/* MII/media information */
    500 
    501 	pci_chipset_tag_t sc_pc;
    502 	pcitag_t sc_pcitag;
    503 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    504 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    505 
    506 	uint16_t sc_pcidevid;		/* PCI device ID */
    507 	wm_chip_type sc_type;		/* MAC type */
    508 	int sc_rev;			/* MAC revision */
    509 	wm_phy_type sc_phytype;		/* PHY type */
    510 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    511 #define	WM_MEDIATYPE_UNKNOWN		0x00
    512 #define	WM_MEDIATYPE_FIBER		0x01
    513 #define	WM_MEDIATYPE_COPPER		0x02
    514 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    515 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    516 	int sc_flags;			/* flags; see below */
    517 	int sc_if_flags;		/* last if_flags */
    518 	int sc_ec_capenable;		/* last ec_capenable */
    519 	int sc_flowflags;		/* 802.3x flow control flags */
    520 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    521 	int sc_align_tweak;
    522 
    523 	void *sc_ihs[WM_MAX_NINTR];	/*
    524 					 * interrupt cookie.
    525 					 * - legacy and msi use sc_ihs[0] only
    526 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    527 					 */
    528 	pci_intr_handle_t *sc_intrs;	/*
    529 					 * legacy and msi use sc_intrs[0] only
    530 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    531 					 */
    532 	int sc_nintrs;			/* number of interrupts */
    533 
    534 	int sc_link_intr_idx;		/* index of MSI-X tables */
    535 
    536 	callout_t sc_tick_ch;		/* tick callout */
    537 	bool sc_core_stopping;
    538 
    539 	int sc_nvm_ver_major;
    540 	int sc_nvm_ver_minor;
    541 	int sc_nvm_ver_build;
    542 	int sc_nvm_addrbits;		/* NVM address bits */
    543 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    544 	int sc_ich8_flash_base;
    545 	int sc_ich8_flash_bank_size;
    546 	int sc_nvm_k1_enabled;
    547 
    548 	int sc_nqueues;
    549 	struct wm_queue *sc_queue;
    550 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    551 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    552 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    553 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    554 
    555 	int sc_affinity_offset;
    556 
    557 #ifdef WM_EVENT_COUNTERS
    558 	/* Event counters. */
    559 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    560 
    561 	/* WM_T_82542_2_1 only */
    562 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    563 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    564 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    565 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    566 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    567 #endif /* WM_EVENT_COUNTERS */
    568 
    569 	/* This variable are used only on the 82547. */
    570 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    571 
    572 	uint32_t sc_ctrl;		/* prototype CTRL register */
    573 #if 0
    574 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    575 #endif
    576 	uint32_t sc_icr;		/* prototype interrupt bits */
    577 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    578 	uint32_t sc_tctl;		/* prototype TCTL register */
    579 	uint32_t sc_rctl;		/* prototype RCTL register */
    580 	uint32_t sc_txcw;		/* prototype TXCW register */
    581 	uint32_t sc_tipg;		/* prototype TIPG register */
    582 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    583 	uint32_t sc_pba;		/* prototype PBA register */
    584 
    585 	int sc_tbi_linkup;		/* TBI link status */
    586 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    587 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    588 
    589 	int sc_mchash_type;		/* multicast filter offset */
    590 
    591 	krndsource_t rnd_source;	/* random source */
    592 
    593 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    594 
    595 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    596 	kmutex_t *sc_ich_phymtx;	/*
    597 					 * 82574/82583/ICH/PCH specific PHY
    598 					 * mutex. For 82574/82583, the mutex
    599 					 * is used for both PHY and NVM.
    600 					 */
    601 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    602 
    603 	struct wm_phyop phy;
    604 	struct wm_nvmop nvm;
    605 };
    606 
    607 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    608 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    609 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    610 
    611 #define	WM_RXCHAIN_RESET(rxq)						\
    612 do {									\
    613 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    614 	*(rxq)->rxq_tailp = NULL;					\
    615 	(rxq)->rxq_len = 0;						\
    616 } while (/*CONSTCOND*/0)
    617 
    618 #define	WM_RXCHAIN_LINK(rxq, m)						\
    619 do {									\
    620 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    621 	(rxq)->rxq_tailp = &(m)->m_next;				\
    622 } while (/*CONSTCOND*/0)
    623 
    624 #ifdef WM_EVENT_COUNTERS
    625 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    626 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    627 
    628 #define WM_Q_EVCNT_INCR(qname, evname)			\
    629 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    630 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    631 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    632 #else /* !WM_EVENT_COUNTERS */
    633 #define	WM_EVCNT_INCR(ev)	/* nothing */
    634 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    635 
    636 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    637 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    638 #endif /* !WM_EVENT_COUNTERS */
    639 
    640 #define	CSR_READ(sc, reg)						\
    641 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    642 #define	CSR_WRITE(sc, reg, val)						\
    643 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    644 #define	CSR_WRITE_FLUSH(sc)						\
    645 	(void) CSR_READ((sc), WMREG_STATUS)
    646 
    647 #define ICH8_FLASH_READ32(sc, reg)					\
    648 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    649 	    (reg) + sc->sc_flashreg_offset)
    650 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    651 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    652 	    (reg) + sc->sc_flashreg_offset, (data))
    653 
    654 #define ICH8_FLASH_READ16(sc, reg)					\
    655 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    656 	    (reg) + sc->sc_flashreg_offset)
    657 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    658 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    659 	    (reg) + sc->sc_flashreg_offset, (data))
    660 
    661 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    662 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    663 
    664 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    665 #define	WM_CDTXADDR_HI(txq, x)						\
    666 	(sizeof(bus_addr_t) == 8 ?					\
    667 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    668 
    669 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    670 #define	WM_CDRXADDR_HI(rxq, x)						\
    671 	(sizeof(bus_addr_t) == 8 ?					\
    672 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    673 
    674 /*
    675  * Register read/write functions.
    676  * Other than CSR_{READ|WRITE}().
    677  */
    678 #if 0
    679 static inline uint32_t wm_io_read(struct wm_softc *, int);
    680 #endif
    681 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    682 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    683     uint32_t, uint32_t);
    684 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    685 
    686 /*
    687  * Descriptor sync/init functions.
    688  */
    689 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    690 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    691 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    692 
    693 /*
    694  * Device driver interface functions and commonly used functions.
    695  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    696  */
    697 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    698 static int	wm_match(device_t, cfdata_t, void *);
    699 static void	wm_attach(device_t, device_t, void *);
    700 static int	wm_detach(device_t, int);
    701 static bool	wm_suspend(device_t, const pmf_qual_t *);
    702 static bool	wm_resume(device_t, const pmf_qual_t *);
    703 static void	wm_watchdog(struct ifnet *);
    704 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    705     uint16_t *);
    706 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    707     uint16_t *);
    708 static void	wm_tick(void *);
    709 static int	wm_ifflags_cb(struct ethercom *);
    710 static int	wm_ioctl(struct ifnet *, u_long, void *);
    711 /* MAC address related */
    712 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    713 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    714 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    715 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    716 static int	wm_rar_count(struct wm_softc *);
    717 static void	wm_set_filter(struct wm_softc *);
    718 /* Reset and init related */
    719 static void	wm_set_vlan(struct wm_softc *);
    720 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    721 static void	wm_get_auto_rd_done(struct wm_softc *);
    722 static void	wm_lan_init_done(struct wm_softc *);
    723 static void	wm_get_cfg_done(struct wm_softc *);
    724 static int	wm_phy_post_reset(struct wm_softc *);
    725 static int	wm_write_smbus_addr(struct wm_softc *);
    726 static int	wm_init_lcd_from_nvm(struct wm_softc *);
    727 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    728 static void	wm_initialize_hardware_bits(struct wm_softc *);
    729 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    730 static int	wm_reset_phy(struct wm_softc *);
    731 static void	wm_flush_desc_rings(struct wm_softc *);
    732 static void	wm_reset(struct wm_softc *);
    733 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    734 static void	wm_rxdrain(struct wm_rxqueue *);
    735 static void	wm_init_rss(struct wm_softc *);
    736 static void	wm_adjust_qnum(struct wm_softc *, int);
    737 static inline bool	wm_is_using_msix(struct wm_softc *);
    738 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    739 static int	wm_softint_establish(struct wm_softc *, int, int);
    740 static int	wm_setup_legacy(struct wm_softc *);
    741 static int	wm_setup_msix(struct wm_softc *);
    742 static int	wm_init(struct ifnet *);
    743 static int	wm_init_locked(struct ifnet *);
    744 static void	wm_unset_stopping_flags(struct wm_softc *);
    745 static void	wm_set_stopping_flags(struct wm_softc *);
    746 static void	wm_stop(struct ifnet *, int);
    747 static void	wm_stop_locked(struct ifnet *, int);
    748 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    749 static void	wm_82547_txfifo_stall(void *);
    750 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    751 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    752 /* DMA related */
    753 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    754 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    755 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    756 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    757     struct wm_txqueue *);
    758 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    759 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    760 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    761     struct wm_rxqueue *);
    762 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    763 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    764 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    765 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    766 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    767 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    768 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    769     struct wm_txqueue *);
    770 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    771     struct wm_rxqueue *);
    772 static int	wm_alloc_txrx_queues(struct wm_softc *);
    773 static void	wm_free_txrx_queues(struct wm_softc *);
    774 static int	wm_init_txrx_queues(struct wm_softc *);
    775 /* Start */
    776 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    777     struct wm_txsoft *, uint32_t *, uint8_t *);
    778 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    779 static void	wm_start(struct ifnet *);
    780 static void	wm_start_locked(struct ifnet *);
    781 static int	wm_transmit(struct ifnet *, struct mbuf *);
    782 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    783 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    784     bool);
    785 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    786     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    787 static void	wm_nq_start(struct ifnet *);
    788 static void	wm_nq_start_locked(struct ifnet *);
    789 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    790 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    791 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    792     bool);
    793 static void	wm_deferred_start_locked(struct wm_txqueue *);
    794 static void	wm_handle_queue(void *);
    795 /* Interrupt */
    796 static bool	wm_txeof(struct wm_txqueue *, u_int);
    797 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    798 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    799 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    800 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    801 static void	wm_linkintr(struct wm_softc *, uint32_t);
    802 static int	wm_intr_legacy(void *);
    803 static inline void	wm_txrxintr_disable(struct wm_queue *);
    804 static inline void	wm_txrxintr_enable(struct wm_queue *);
    805 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    806 static int	wm_txrxintr_msix(void *);
    807 static int	wm_linkintr_msix(void *);
    808 
    809 /*
    810  * Media related.
    811  * GMII, SGMII, TBI, SERDES and SFP.
    812  */
    813 /* Common */
    814 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    815 /* GMII related */
    816 static void	wm_gmii_reset(struct wm_softc *);
    817 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    818 static int	wm_get_phy_id_82575(struct wm_softc *);
    819 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    820 static int	wm_gmii_mediachange(struct ifnet *);
    821 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    822 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    823 static uint16_t	wm_i82543_mii_recvbits(struct wm_softc *);
    824 static int	wm_gmii_i82543_readreg(device_t, int, int, uint16_t *);
    825 static int	wm_gmii_i82543_writereg(device_t, int, int, uint16_t);
    826 static int	wm_gmii_mdic_readreg(device_t, int, int, uint16_t *);
    827 static int	wm_gmii_mdic_writereg(device_t, int, int, uint16_t);
    828 static int	wm_gmii_i82544_readreg(device_t, int, int, uint16_t *);
    829 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    830 static int	wm_gmii_i82544_writereg(device_t, int, int, uint16_t);
    831 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    832 static int	wm_gmii_i80003_readreg(device_t, int, int, uint16_t *);
    833 static int	wm_gmii_i80003_writereg(device_t, int, int, uint16_t);
    834 static int	wm_gmii_bm_readreg(device_t, int, int, uint16_t *);
    835 static int	wm_gmii_bm_writereg(device_t, int, int, uint16_t);
    836 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    837 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    838 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    839 	bool);
    840 static int	wm_gmii_hv_readreg(device_t, int, int, uint16_t *);
    841 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    842 static int	wm_gmii_hv_writereg(device_t, int, int, uint16_t);
    843 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    844 static int	wm_gmii_82580_readreg(device_t, int, int, uint16_t *);
    845 static int	wm_gmii_82580_writereg(device_t, int, int, uint16_t);
    846 static int	wm_gmii_gs40g_readreg(device_t, int, int, uint16_t *);
    847 static int	wm_gmii_gs40g_writereg(device_t, int, int, uint16_t);
    848 static void	wm_gmii_statchg(struct ifnet *);
    849 /*
    850  * kumeran related (80003, ICH* and PCH*).
    851  * These functions are not for accessing MII registers but for accessing
    852  * kumeran specific registers.
    853  */
    854 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    855 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    856 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    857 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    858 /* EMI register related */
    859 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    860 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    861 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    862 /* SGMII */
    863 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    864 static int	wm_sgmii_readreg(device_t, int, int, uint16_t *);
    865 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    866 static int	wm_sgmii_writereg(device_t, int, int, uint16_t);
    867 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    868 /* TBI related */
    869 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    870 static void	wm_tbi_mediainit(struct wm_softc *);
    871 static int	wm_tbi_mediachange(struct ifnet *);
    872 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    873 static int	wm_check_for_link(struct wm_softc *);
    874 static void	wm_tbi_tick(struct wm_softc *);
    875 /* SERDES related */
    876 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    877 static int	wm_serdes_mediachange(struct ifnet *);
    878 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    879 static void	wm_serdes_tick(struct wm_softc *);
    880 /* SFP related */
    881 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    882 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    883 
    884 /*
    885  * NVM related.
    886  * Microwire, SPI (w/wo EERD) and Flash.
    887  */
    888 /* Misc functions */
    889 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    890 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    891 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    892 /* Microwire */
    893 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    894 /* SPI */
    895 static int	wm_nvm_ready_spi(struct wm_softc *);
    896 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    897 /* Using with EERD */
    898 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    899 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    900 /* Flash */
    901 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    902     unsigned int *);
    903 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    904 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    905 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    906     uint32_t *);
    907 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    908 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    909 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    910 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    911 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    912 /* iNVM */
    913 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    914 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    915 /* Lock, detecting NVM type, validate checksum and read */
    916 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    917 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    918 static int	wm_nvm_validate_checksum(struct wm_softc *);
    919 static void	wm_nvm_version_invm(struct wm_softc *);
    920 static void	wm_nvm_version(struct wm_softc *);
    921 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    922 
    923 /*
    924  * Hardware semaphores.
    925  * Very complexed...
    926  */
    927 static int	wm_get_null(struct wm_softc *);
    928 static void	wm_put_null(struct wm_softc *);
    929 static int	wm_get_eecd(struct wm_softc *);
    930 static void	wm_put_eecd(struct wm_softc *);
    931 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    932 static void	wm_put_swsm_semaphore(struct wm_softc *);
    933 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    934 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    935 static int	wm_get_nvm_80003(struct wm_softc *);
    936 static void	wm_put_nvm_80003(struct wm_softc *);
    937 static int	wm_get_nvm_82571(struct wm_softc *);
    938 static void	wm_put_nvm_82571(struct wm_softc *);
    939 static int	wm_get_phy_82575(struct wm_softc *);
    940 static void	wm_put_phy_82575(struct wm_softc *);
    941 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    942 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    943 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    944 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    945 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    946 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    947 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    948 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    949 
    950 /*
    951  * Management mode and power management related subroutines.
    952  * BMC, AMT, suspend/resume and EEE.
    953  */
    954 #if 0
    955 static int	wm_check_mng_mode(struct wm_softc *);
    956 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    957 static int	wm_check_mng_mode_82574(struct wm_softc *);
    958 static int	wm_check_mng_mode_generic(struct wm_softc *);
    959 #endif
    960 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    961 static bool	wm_phy_resetisblocked(struct wm_softc *);
    962 static void	wm_get_hw_control(struct wm_softc *);
    963 static void	wm_release_hw_control(struct wm_softc *);
    964 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    965 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    966 static void	wm_init_manageability(struct wm_softc *);
    967 static void	wm_release_manageability(struct wm_softc *);
    968 static void	wm_get_wakeup(struct wm_softc *);
    969 static int	wm_ulp_disable(struct wm_softc *);
    970 static int	wm_enable_phy_wakeup(struct wm_softc *);
    971 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    972 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    973 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    974 static void	wm_enable_wakeup(struct wm_softc *);
    975 static void	wm_disable_aspm(struct wm_softc *);
    976 /* LPLU (Low Power Link Up) */
    977 static void	wm_lplu_d0_disable(struct wm_softc *);
    978 /* EEE */
    979 static int	wm_set_eee_i350(struct wm_softc *);
    980 static int	wm_set_eee_pchlan(struct wm_softc *);
    981 static int	wm_set_eee(struct wm_softc *);
    982 
    983 /*
    984  * Workarounds (mainly PHY related).
    985  * Basically, PHY's workarounds are in the PHY drivers.
    986  */
    987 static int	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    988 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    989 static int	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
    990 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
    991 static int	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
    992 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    993 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    994 static int	wm_k1_workaround_lv(struct wm_softc *);
    995 static int	wm_link_stall_workaround_hv(struct wm_softc *);
    996 static int	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    997 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    998 static void	wm_reset_init_script_82575(struct wm_softc *);
    999 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1000 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1001 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1002 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1003 static int	wm_pll_workaround_i210(struct wm_softc *);
   1004 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1005 
   1006 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1007     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1008 
   1009 /*
   1010  * Devices supported by this driver.
   1011  */
   1012 static const struct wm_product {
   1013 	pci_vendor_id_t		wmp_vendor;
   1014 	pci_product_id_t	wmp_product;
   1015 	const char		*wmp_name;
   1016 	wm_chip_type		wmp_type;
   1017 	uint32_t		wmp_flags;
   1018 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1019 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1020 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1021 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1022 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1023 } wm_products[] = {
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1025 	  "Intel i82542 1000BASE-X Ethernet",
   1026 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1029 	  "Intel i82543GC 1000BASE-X Ethernet",
   1030 	  WM_T_82543,		WMP_F_FIBER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1033 	  "Intel i82543GC 1000BASE-T Ethernet",
   1034 	  WM_T_82543,		WMP_F_COPPER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1037 	  "Intel i82544EI 1000BASE-T Ethernet",
   1038 	  WM_T_82544,		WMP_F_COPPER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1041 	  "Intel i82544EI 1000BASE-X Ethernet",
   1042 	  WM_T_82544,		WMP_F_FIBER },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1045 	  "Intel i82544GC 1000BASE-T Ethernet",
   1046 	  WM_T_82544,		WMP_F_COPPER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1049 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1050 	  WM_T_82544,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1053 	  "Intel i82540EM 1000BASE-T Ethernet",
   1054 	  WM_T_82540,		WMP_F_COPPER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1057 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1058 	  WM_T_82540,		WMP_F_COPPER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1061 	  "Intel i82540EP 1000BASE-T Ethernet",
   1062 	  WM_T_82540,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1065 	  "Intel i82540EP 1000BASE-T Ethernet",
   1066 	  WM_T_82540,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1069 	  "Intel i82540EP 1000BASE-T Ethernet",
   1070 	  WM_T_82540,		WMP_F_COPPER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1073 	  "Intel i82545EM 1000BASE-T Ethernet",
   1074 	  WM_T_82545,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1077 	  "Intel i82545GM 1000BASE-T Ethernet",
   1078 	  WM_T_82545_3,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1081 	  "Intel i82545GM 1000BASE-X Ethernet",
   1082 	  WM_T_82545_3,		WMP_F_FIBER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1085 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1086 	  WM_T_82545_3,		WMP_F_SERDES },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1089 	  "Intel i82546EB 1000BASE-T Ethernet",
   1090 	  WM_T_82546,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1093 	  "Intel i82546EB 1000BASE-T Ethernet",
   1094 	  WM_T_82546,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1097 	  "Intel i82545EM 1000BASE-X Ethernet",
   1098 	  WM_T_82545,		WMP_F_FIBER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1101 	  "Intel i82546EB 1000BASE-X Ethernet",
   1102 	  WM_T_82546,		WMP_F_FIBER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1105 	  "Intel i82546GB 1000BASE-T Ethernet",
   1106 	  WM_T_82546_3,		WMP_F_COPPER },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1109 	  "Intel i82546GB 1000BASE-X Ethernet",
   1110 	  WM_T_82546_3,		WMP_F_FIBER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1113 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1114 	  WM_T_82546_3,		WMP_F_SERDES },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1117 	  "i82546GB quad-port Gigabit Ethernet",
   1118 	  WM_T_82546_3,		WMP_F_COPPER },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1121 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1122 	  WM_T_82546_3,		WMP_F_COPPER },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1125 	  "Intel PRO/1000MT (82546GB)",
   1126 	  WM_T_82546_3,		WMP_F_COPPER },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1129 	  "Intel i82541EI 1000BASE-T Ethernet",
   1130 	  WM_T_82541,		WMP_F_COPPER },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1133 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1134 	  WM_T_82541,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1137 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1138 	  WM_T_82541,		WMP_F_COPPER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1141 	  "Intel i82541ER 1000BASE-T Ethernet",
   1142 	  WM_T_82541_2,		WMP_F_COPPER },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1145 	  "Intel i82541GI 1000BASE-T Ethernet",
   1146 	  WM_T_82541_2,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1149 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1150 	  WM_T_82541_2,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1153 	  "Intel i82541PI 1000BASE-T Ethernet",
   1154 	  WM_T_82541_2,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1157 	  "Intel i82547EI 1000BASE-T Ethernet",
   1158 	  WM_T_82547,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1161 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1162 	  WM_T_82547,		WMP_F_COPPER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1165 	  "Intel i82547GI 1000BASE-T Ethernet",
   1166 	  WM_T_82547_2,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1169 	  "Intel PRO/1000 PT (82571EB)",
   1170 	  WM_T_82571,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1173 	  "Intel PRO/1000 PF (82571EB)",
   1174 	  WM_T_82571,		WMP_F_FIBER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1177 	  "Intel PRO/1000 PB (82571EB)",
   1178 	  WM_T_82571,		WMP_F_SERDES },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1181 	  "Intel PRO/1000 QT (82571EB)",
   1182 	  WM_T_82571,		WMP_F_COPPER },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1185 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1186 	  WM_T_82571,		WMP_F_COPPER, },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1189 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1190 	  WM_T_82571,		WMP_F_COPPER, },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1193 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1194 	  WM_T_82571,		WMP_F_SERDES, },
   1195 
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1197 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1198 	  WM_T_82571,		WMP_F_SERDES, },
   1199 
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1201 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1202 	  WM_T_82571,		WMP_F_FIBER, },
   1203 
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1205 	  "Intel i82572EI 1000baseT Ethernet",
   1206 	  WM_T_82572,		WMP_F_COPPER },
   1207 
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1209 	  "Intel i82572EI 1000baseX Ethernet",
   1210 	  WM_T_82572,		WMP_F_FIBER },
   1211 
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1213 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1214 	  WM_T_82572,		WMP_F_SERDES },
   1215 
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1217 	  "Intel i82572EI 1000baseT Ethernet",
   1218 	  WM_T_82572,		WMP_F_COPPER },
   1219 
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1221 	  "Intel i82573E",
   1222 	  WM_T_82573,		WMP_F_COPPER },
   1223 
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1225 	  "Intel i82573E IAMT",
   1226 	  WM_T_82573,		WMP_F_COPPER },
   1227 
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1229 	  "Intel i82573L Gigabit Ethernet",
   1230 	  WM_T_82573,		WMP_F_COPPER },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1233 	  "Intel i82574L",
   1234 	  WM_T_82574,		WMP_F_COPPER },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1237 	  "Intel i82574L",
   1238 	  WM_T_82574,		WMP_F_COPPER },
   1239 
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1241 	  "Intel i82583V",
   1242 	  WM_T_82583,		WMP_F_COPPER },
   1243 
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1245 	  "i80003 dual 1000baseT Ethernet",
   1246 	  WM_T_80003,		WMP_F_COPPER },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1249 	  "i80003 dual 1000baseX Ethernet",
   1250 	  WM_T_80003,		WMP_F_COPPER },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1253 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1254 	  WM_T_80003,		WMP_F_SERDES },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1257 	  "Intel i80003 1000baseT Ethernet",
   1258 	  WM_T_80003,		WMP_F_COPPER },
   1259 
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1261 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1262 	  WM_T_80003,		WMP_F_SERDES },
   1263 
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1265 	  "Intel i82801H (M_AMT) LAN Controller",
   1266 	  WM_T_ICH8,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1268 	  "Intel i82801H (AMT) LAN Controller",
   1269 	  WM_T_ICH8,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1271 	  "Intel i82801H LAN Controller",
   1272 	  WM_T_ICH8,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1274 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1275 	  WM_T_ICH8,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1277 	  "Intel i82801H (M) LAN Controller",
   1278 	  WM_T_ICH8,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1280 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1281 	  WM_T_ICH8,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1283 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1284 	  WM_T_ICH8,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1286 	  "82567V-3 LAN Controller",
   1287 	  WM_T_ICH8,		WMP_F_COPPER },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1289 	  "82801I (AMT) LAN Controller",
   1290 	  WM_T_ICH9,		WMP_F_COPPER },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1292 	  "82801I 10/100 LAN Controller",
   1293 	  WM_T_ICH9,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1295 	  "82801I (G) 10/100 LAN Controller",
   1296 	  WM_T_ICH9,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1298 	  "82801I (GT) 10/100 LAN Controller",
   1299 	  WM_T_ICH9,		WMP_F_COPPER },
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1301 	  "82801I (C) LAN Controller",
   1302 	  WM_T_ICH9,		WMP_F_COPPER },
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1304 	  "82801I mobile LAN Controller",
   1305 	  WM_T_ICH9,		WMP_F_COPPER },
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1307 	  "82801I mobile (V) LAN Controller",
   1308 	  WM_T_ICH9,		WMP_F_COPPER },
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1310 	  "82801I mobile (AMT) LAN Controller",
   1311 	  WM_T_ICH9,		WMP_F_COPPER },
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1313 	  "82567LM-4 LAN Controller",
   1314 	  WM_T_ICH9,		WMP_F_COPPER },
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1316 	  "82567LM-2 LAN Controller",
   1317 	  WM_T_ICH10,		WMP_F_COPPER },
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1319 	  "82567LF-2 LAN Controller",
   1320 	  WM_T_ICH10,		WMP_F_COPPER },
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1322 	  "82567LM-3 LAN Controller",
   1323 	  WM_T_ICH10,		WMP_F_COPPER },
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1325 	  "82567LF-3 LAN Controller",
   1326 	  WM_T_ICH10,		WMP_F_COPPER },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1328 	  "82567V-2 LAN Controller",
   1329 	  WM_T_ICH10,		WMP_F_COPPER },
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1331 	  "82567V-3? LAN Controller",
   1332 	  WM_T_ICH10,		WMP_F_COPPER },
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1334 	  "HANKSVILLE LAN Controller",
   1335 	  WM_T_ICH10,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1337 	  "PCH LAN (82577LM) Controller",
   1338 	  WM_T_PCH,		WMP_F_COPPER },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1340 	  "PCH LAN (82577LC) Controller",
   1341 	  WM_T_PCH,		WMP_F_COPPER },
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1343 	  "PCH LAN (82578DM) Controller",
   1344 	  WM_T_PCH,		WMP_F_COPPER },
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1346 	  "PCH LAN (82578DC) Controller",
   1347 	  WM_T_PCH,		WMP_F_COPPER },
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1349 	  "PCH2 LAN (82579LM) Controller",
   1350 	  WM_T_PCH2,		WMP_F_COPPER },
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1352 	  "PCH2 LAN (82579V) Controller",
   1353 	  WM_T_PCH2,		WMP_F_COPPER },
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1355 	  "82575EB dual-1000baseT Ethernet",
   1356 	  WM_T_82575,		WMP_F_COPPER },
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1358 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1359 	  WM_T_82575,		WMP_F_SERDES },
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1361 	  "82575GB quad-1000baseT Ethernet",
   1362 	  WM_T_82575,		WMP_F_COPPER },
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1364 	  "82575GB quad-1000baseT Ethernet (PM)",
   1365 	  WM_T_82575,		WMP_F_COPPER },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1367 	  "82576 1000BaseT Ethernet",
   1368 	  WM_T_82576,		WMP_F_COPPER },
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1370 	  "82576 1000BaseX Ethernet",
   1371 	  WM_T_82576,		WMP_F_FIBER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1374 	  "82576 gigabit Ethernet (SERDES)",
   1375 	  WM_T_82576,		WMP_F_SERDES },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1378 	  "82576 quad-1000BaseT Ethernet",
   1379 	  WM_T_82576,		WMP_F_COPPER },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1382 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1383 	  WM_T_82576,		WMP_F_COPPER },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1386 	  "82576 gigabit Ethernet",
   1387 	  WM_T_82576,		WMP_F_COPPER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1390 	  "82576 gigabit Ethernet (SERDES)",
   1391 	  WM_T_82576,		WMP_F_SERDES },
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1393 	  "82576 quad-gigabit Ethernet (SERDES)",
   1394 	  WM_T_82576,		WMP_F_SERDES },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1397 	  "82580 1000BaseT Ethernet",
   1398 	  WM_T_82580,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1400 	  "82580 1000BaseX Ethernet",
   1401 	  WM_T_82580,		WMP_F_FIBER },
   1402 
   1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1404 	  "82580 1000BaseT Ethernet (SERDES)",
   1405 	  WM_T_82580,		WMP_F_SERDES },
   1406 
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1408 	  "82580 gigabit Ethernet (SGMII)",
   1409 	  WM_T_82580,		WMP_F_COPPER },
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1411 	  "82580 dual-1000BaseT Ethernet",
   1412 	  WM_T_82580,		WMP_F_COPPER },
   1413 
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1415 	  "82580 quad-1000BaseX Ethernet",
   1416 	  WM_T_82580,		WMP_F_FIBER },
   1417 
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1419 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1420 	  WM_T_82580,		WMP_F_COPPER },
   1421 
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1423 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1424 	  WM_T_82580,		WMP_F_SERDES },
   1425 
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1427 	  "DH89XXCC 1000BASE-KX Ethernet",
   1428 	  WM_T_82580,		WMP_F_SERDES },
   1429 
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1431 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1432 	  WM_T_82580,		WMP_F_SERDES },
   1433 
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1435 	  "I350 Gigabit Network Connection",
   1436 	  WM_T_I350,		WMP_F_COPPER },
   1437 
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1439 	  "I350 Gigabit Fiber Network Connection",
   1440 	  WM_T_I350,		WMP_F_FIBER },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1443 	  "I350 Gigabit Backplane Connection",
   1444 	  WM_T_I350,		WMP_F_SERDES },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1447 	  "I350 Quad Port Gigabit Ethernet",
   1448 	  WM_T_I350,		WMP_F_SERDES },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1451 	  "I350 Gigabit Connection",
   1452 	  WM_T_I350,		WMP_F_COPPER },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1455 	  "I354 Gigabit Ethernet (KX)",
   1456 	  WM_T_I354,		WMP_F_SERDES },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1459 	  "I354 Gigabit Ethernet (SGMII)",
   1460 	  WM_T_I354,		WMP_F_COPPER },
   1461 
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1463 	  "I354 Gigabit Ethernet (2.5G)",
   1464 	  WM_T_I354,		WMP_F_COPPER },
   1465 
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1467 	  "I210-T1 Ethernet Server Adapter",
   1468 	  WM_T_I210,		WMP_F_COPPER },
   1469 
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1471 	  "I210 Ethernet (Copper OEM)",
   1472 	  WM_T_I210,		WMP_F_COPPER },
   1473 
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1475 	  "I210 Ethernet (Copper IT)",
   1476 	  WM_T_I210,		WMP_F_COPPER },
   1477 
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1479 	  "I210 Ethernet (FLASH less)",
   1480 	  WM_T_I210,		WMP_F_COPPER },
   1481 
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1483 	  "I210 Gigabit Ethernet (Fiber)",
   1484 	  WM_T_I210,		WMP_F_FIBER },
   1485 
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1487 	  "I210 Gigabit Ethernet (SERDES)",
   1488 	  WM_T_I210,		WMP_F_SERDES },
   1489 
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1491 	  "I210 Gigabit Ethernet (FLASH less)",
   1492 	  WM_T_I210,		WMP_F_SERDES },
   1493 
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1495 	  "I210 Gigabit Ethernet (SGMII)",
   1496 	  WM_T_I210,		WMP_F_COPPER },
   1497 
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1499 	  "I211 Ethernet (COPPER)",
   1500 	  WM_T_I211,		WMP_F_COPPER },
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1502 	  "I217 V Ethernet Connection",
   1503 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1505 	  "I217 LM Ethernet Connection",
   1506 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1508 	  "I218 V Ethernet Connection",
   1509 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1511 	  "I218 V Ethernet Connection",
   1512 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1514 	  "I218 V Ethernet Connection",
   1515 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1517 	  "I218 LM Ethernet Connection",
   1518 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1520 	  "I218 LM Ethernet Connection",
   1521 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1523 	  "I218 LM Ethernet Connection",
   1524 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1526 	  "I219 V Ethernet Connection",
   1527 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1529 	  "I219 V Ethernet Connection",
   1530 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1532 	  "I219 V Ethernet Connection",
   1533 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1535 	  "I219 V Ethernet Connection",
   1536 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1538 	  "I219 LM Ethernet Connection",
   1539 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1541 	  "I219 LM Ethernet Connection",
   1542 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1544 	  "I219 LM Ethernet Connection",
   1545 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1547 	  "I219 LM Ethernet Connection",
   1548 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1550 	  "I219 LM Ethernet Connection",
   1551 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1553 	  "I219 V Ethernet Connection",
   1554 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1556 	  "I219 V Ethernet Connection",
   1557 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1559 	  "I219 LM Ethernet Connection",
   1560 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1562 	  "I219 LM Ethernet Connection",
   1563 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1564 	{ 0,			0,
   1565 	  NULL,
   1566 	  0,			0 },
   1567 };
   1568 
   1569 /*
   1570  * Register read/write functions.
   1571  * Other than CSR_{READ|WRITE}().
   1572  */
   1573 
   1574 #if 0 /* Not currently used */
   1575 static inline uint32_t
   1576 wm_io_read(struct wm_softc *sc, int reg)
   1577 {
   1578 
   1579 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1580 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1581 }
   1582 #endif
   1583 
   1584 static inline void
   1585 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1586 {
   1587 
   1588 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1589 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1590 }
   1591 
   1592 static inline void
   1593 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1594     uint32_t data)
   1595 {
   1596 	uint32_t regval;
   1597 	int i;
   1598 
   1599 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1600 
   1601 	CSR_WRITE(sc, reg, regval);
   1602 
   1603 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1604 		delay(5);
   1605 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1606 			break;
   1607 	}
   1608 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1609 		aprint_error("%s: WARNING:"
   1610 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1611 		    device_xname(sc->sc_dev), reg);
   1612 	}
   1613 }
   1614 
   1615 static inline void
   1616 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1617 {
   1618 	wa->wa_low = htole32(v & 0xffffffffU);
   1619 	if (sizeof(bus_addr_t) == 8)
   1620 		wa->wa_high = htole32((uint64_t) v >> 32);
   1621 	else
   1622 		wa->wa_high = 0;
   1623 }
   1624 
   1625 /*
   1626  * Descriptor sync/init functions.
   1627  */
   1628 static inline void
   1629 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1630 {
   1631 	struct wm_softc *sc = txq->txq_sc;
   1632 
   1633 	/* If it will wrap around, sync to the end of the ring. */
   1634 	if ((start + num) > WM_NTXDESC(txq)) {
   1635 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1636 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1637 		    (WM_NTXDESC(txq) - start), ops);
   1638 		num -= (WM_NTXDESC(txq) - start);
   1639 		start = 0;
   1640 	}
   1641 
   1642 	/* Now sync whatever is left. */
   1643 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1644 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1645 }
   1646 
   1647 static inline void
   1648 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1649 {
   1650 	struct wm_softc *sc = rxq->rxq_sc;
   1651 
   1652 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1653 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1654 }
   1655 
   1656 static inline void
   1657 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1658 {
   1659 	struct wm_softc *sc = rxq->rxq_sc;
   1660 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1661 	struct mbuf *m = rxs->rxs_mbuf;
   1662 
   1663 	/*
   1664 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1665 	 * so that the payload after the Ethernet header is aligned
   1666 	 * to a 4-byte boundary.
   1667 
   1668 	 * XXX BRAINDAMAGE ALERT!
   1669 	 * The stupid chip uses the same size for every buffer, which
   1670 	 * is set in the Receive Control register.  We are using the 2K
   1671 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1672 	 * reason, we can't "scoot" packets longer than the standard
   1673 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1674 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1675 	 * the upper layer copy the headers.
   1676 	 */
   1677 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1678 
   1679 	if (sc->sc_type == WM_T_82574) {
   1680 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1681 		rxd->erx_data.erxd_addr =
   1682 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1683 		rxd->erx_data.erxd_dd = 0;
   1684 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1685 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1686 
   1687 		rxd->nqrx_data.nrxd_paddr =
   1688 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1689 		/* Currently, split header is not supported. */
   1690 		rxd->nqrx_data.nrxd_haddr = 0;
   1691 	} else {
   1692 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1693 
   1694 		wm_set_dma_addr(&rxd->wrx_addr,
   1695 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1696 		rxd->wrx_len = 0;
   1697 		rxd->wrx_cksum = 0;
   1698 		rxd->wrx_status = 0;
   1699 		rxd->wrx_errors = 0;
   1700 		rxd->wrx_special = 0;
   1701 	}
   1702 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1703 
   1704 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1705 }
   1706 
   1707 /*
   1708  * Device driver interface functions and commonly used functions.
   1709  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1710  */
   1711 
   1712 /* Lookup supported device table */
   1713 static const struct wm_product *
   1714 wm_lookup(const struct pci_attach_args *pa)
   1715 {
   1716 	const struct wm_product *wmp;
   1717 
   1718 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1719 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1720 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1721 			return wmp;
   1722 	}
   1723 	return NULL;
   1724 }
   1725 
   1726 /* The match function (ca_match) */
   1727 static int
   1728 wm_match(device_t parent, cfdata_t cf, void *aux)
   1729 {
   1730 	struct pci_attach_args *pa = aux;
   1731 
   1732 	if (wm_lookup(pa) != NULL)
   1733 		return 1;
   1734 
   1735 	return 0;
   1736 }
   1737 
   1738 /* The attach function (ca_attach) */
   1739 static void
   1740 wm_attach(device_t parent, device_t self, void *aux)
   1741 {
   1742 	struct wm_softc *sc = device_private(self);
   1743 	struct pci_attach_args *pa = aux;
   1744 	prop_dictionary_t dict;
   1745 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1746 	pci_chipset_tag_t pc = pa->pa_pc;
   1747 	int counts[PCI_INTR_TYPE_SIZE];
   1748 	pci_intr_type_t max_type;
   1749 	const char *eetype, *xname;
   1750 	bus_space_tag_t memt;
   1751 	bus_space_handle_t memh;
   1752 	bus_size_t memsize;
   1753 	int memh_valid;
   1754 	int i, error;
   1755 	const struct wm_product *wmp;
   1756 	prop_data_t ea;
   1757 	prop_number_t pn;
   1758 	uint8_t enaddr[ETHER_ADDR_LEN];
   1759 	char buf[256];
   1760 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1761 	pcireg_t preg, memtype;
   1762 	uint16_t eeprom_data, apme_mask;
   1763 	bool force_clear_smbi;
   1764 	uint32_t link_mode;
   1765 	uint32_t reg;
   1766 
   1767 	sc->sc_dev = self;
   1768 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1769 	sc->sc_core_stopping = false;
   1770 
   1771 	wmp = wm_lookup(pa);
   1772 #ifdef DIAGNOSTIC
   1773 	if (wmp == NULL) {
   1774 		printf("\n");
   1775 		panic("wm_attach: impossible");
   1776 	}
   1777 #endif
   1778 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1779 
   1780 	sc->sc_pc = pa->pa_pc;
   1781 	sc->sc_pcitag = pa->pa_tag;
   1782 
   1783 	if (pci_dma64_available(pa))
   1784 		sc->sc_dmat = pa->pa_dmat64;
   1785 	else
   1786 		sc->sc_dmat = pa->pa_dmat;
   1787 
   1788 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1789 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1790 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1791 
   1792 	sc->sc_type = wmp->wmp_type;
   1793 
   1794 	/* Set default function pointers */
   1795 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1796 	sc->phy.release = sc->nvm.release = wm_put_null;
   1797 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1798 
   1799 	if (sc->sc_type < WM_T_82543) {
   1800 		if (sc->sc_rev < 2) {
   1801 			aprint_error_dev(sc->sc_dev,
   1802 			    "i82542 must be at least rev. 2\n");
   1803 			return;
   1804 		}
   1805 		if (sc->sc_rev < 3)
   1806 			sc->sc_type = WM_T_82542_2_0;
   1807 	}
   1808 
   1809 	/*
   1810 	 * Disable MSI for Errata:
   1811 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1812 	 *
   1813 	 *  82544: Errata 25
   1814 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1815 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1816 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1817 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1818 	 *
   1819 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1820 	 *
   1821 	 *  82571 & 82572: Errata 63
   1822 	 */
   1823 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1824 	    || (sc->sc_type == WM_T_82572))
   1825 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1826 
   1827 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1828 	    || (sc->sc_type == WM_T_82580)
   1829 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1830 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1831 		sc->sc_flags |= WM_F_NEWQUEUE;
   1832 
   1833 	/* Set device properties (mactype) */
   1834 	dict = device_properties(sc->sc_dev);
   1835 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1836 
   1837 	/*
   1838 	 * Map the device.  All devices support memory-mapped acccess,
   1839 	 * and it is really required for normal operation.
   1840 	 */
   1841 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1842 	switch (memtype) {
   1843 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1844 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1845 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1846 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1847 		break;
   1848 	default:
   1849 		memh_valid = 0;
   1850 		break;
   1851 	}
   1852 
   1853 	if (memh_valid) {
   1854 		sc->sc_st = memt;
   1855 		sc->sc_sh = memh;
   1856 		sc->sc_ss = memsize;
   1857 	} else {
   1858 		aprint_error_dev(sc->sc_dev,
   1859 		    "unable to map device registers\n");
   1860 		return;
   1861 	}
   1862 
   1863 	/*
   1864 	 * In addition, i82544 and later support I/O mapped indirect
   1865 	 * register access.  It is not desirable (nor supported in
   1866 	 * this driver) to use it for normal operation, though it is
   1867 	 * required to work around bugs in some chip versions.
   1868 	 */
   1869 	if (sc->sc_type >= WM_T_82544) {
   1870 		/* First we have to find the I/O BAR. */
   1871 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1872 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1873 			if (memtype == PCI_MAPREG_TYPE_IO)
   1874 				break;
   1875 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1876 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1877 				i += 4;	/* skip high bits, too */
   1878 		}
   1879 		if (i < PCI_MAPREG_END) {
   1880 			/*
   1881 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1882 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1883 			 * It's no problem because newer chips has no this
   1884 			 * bug.
   1885 			 *
   1886 			 * The i8254x doesn't apparently respond when the
   1887 			 * I/O BAR is 0, which looks somewhat like it's not
   1888 			 * been configured.
   1889 			 */
   1890 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1891 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1892 				aprint_error_dev(sc->sc_dev,
   1893 				    "WARNING: I/O BAR at zero.\n");
   1894 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1895 					0, &sc->sc_iot, &sc->sc_ioh,
   1896 					NULL, &sc->sc_ios) == 0) {
   1897 				sc->sc_flags |= WM_F_IOH_VALID;
   1898 			} else
   1899 				aprint_error_dev(sc->sc_dev,
   1900 				    "WARNING: unable to map I/O space\n");
   1901 		}
   1902 
   1903 	}
   1904 
   1905 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1906 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1907 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1908 	if (sc->sc_type < WM_T_82542_2_1)
   1909 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1910 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1911 
   1912 	/* power up chip */
   1913 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1914 	    && error != EOPNOTSUPP) {
   1915 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1916 		return;
   1917 	}
   1918 
   1919 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1920 	/*
   1921 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1922 	 * resource.
   1923 	 */
   1924 	if (sc->sc_nqueues > 1) {
   1925 		max_type = PCI_INTR_TYPE_MSIX;
   1926 		/*
   1927 		 *  82583 has a MSI-X capability in the PCI configuration space
   1928 		 * but it doesn't support it. At least the document doesn't
   1929 		 * say anything about MSI-X.
   1930 		 */
   1931 		counts[PCI_INTR_TYPE_MSIX]
   1932 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1933 	} else {
   1934 		max_type = PCI_INTR_TYPE_MSI;
   1935 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1936 	}
   1937 
   1938 	/* Allocation settings */
   1939 	counts[PCI_INTR_TYPE_MSI] = 1;
   1940 	counts[PCI_INTR_TYPE_INTX] = 1;
   1941 	/* overridden by disable flags */
   1942 	if (wm_disable_msi != 0) {
   1943 		counts[PCI_INTR_TYPE_MSI] = 0;
   1944 		if (wm_disable_msix != 0) {
   1945 			max_type = PCI_INTR_TYPE_INTX;
   1946 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1947 		}
   1948 	} else if (wm_disable_msix != 0) {
   1949 		max_type = PCI_INTR_TYPE_MSI;
   1950 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1951 	}
   1952 
   1953 alloc_retry:
   1954 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1955 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1956 		return;
   1957 	}
   1958 
   1959 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1960 		error = wm_setup_msix(sc);
   1961 		if (error) {
   1962 			pci_intr_release(pc, sc->sc_intrs,
   1963 			    counts[PCI_INTR_TYPE_MSIX]);
   1964 
   1965 			/* Setup for MSI: Disable MSI-X */
   1966 			max_type = PCI_INTR_TYPE_MSI;
   1967 			counts[PCI_INTR_TYPE_MSI] = 1;
   1968 			counts[PCI_INTR_TYPE_INTX] = 1;
   1969 			goto alloc_retry;
   1970 		}
   1971 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1972 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1973 		error = wm_setup_legacy(sc);
   1974 		if (error) {
   1975 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1976 			    counts[PCI_INTR_TYPE_MSI]);
   1977 
   1978 			/* The next try is for INTx: Disable MSI */
   1979 			max_type = PCI_INTR_TYPE_INTX;
   1980 			counts[PCI_INTR_TYPE_INTX] = 1;
   1981 			goto alloc_retry;
   1982 		}
   1983 	} else {
   1984 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1985 		error = wm_setup_legacy(sc);
   1986 		if (error) {
   1987 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1988 			    counts[PCI_INTR_TYPE_INTX]);
   1989 			return;
   1990 		}
   1991 	}
   1992 
   1993 	/*
   1994 	 * Check the function ID (unit number of the chip).
   1995 	 */
   1996 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1997 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   1998 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1999 	    || (sc->sc_type == WM_T_82580)
   2000 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2001 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2002 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2003 	else
   2004 		sc->sc_funcid = 0;
   2005 
   2006 	/*
   2007 	 * Determine a few things about the bus we're connected to.
   2008 	 */
   2009 	if (sc->sc_type < WM_T_82543) {
   2010 		/* We don't really know the bus characteristics here. */
   2011 		sc->sc_bus_speed = 33;
   2012 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2013 		/*
   2014 		 * CSA (Communication Streaming Architecture) is about as fast
   2015 		 * a 32-bit 66MHz PCI Bus.
   2016 		 */
   2017 		sc->sc_flags |= WM_F_CSA;
   2018 		sc->sc_bus_speed = 66;
   2019 		aprint_verbose_dev(sc->sc_dev,
   2020 		    "Communication Streaming Architecture\n");
   2021 		if (sc->sc_type == WM_T_82547) {
   2022 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2023 			callout_setfunc(&sc->sc_txfifo_ch,
   2024 			    wm_82547_txfifo_stall, sc);
   2025 			aprint_verbose_dev(sc->sc_dev,
   2026 			    "using 82547 Tx FIFO stall work-around\n");
   2027 		}
   2028 	} else if (sc->sc_type >= WM_T_82571) {
   2029 		sc->sc_flags |= WM_F_PCIE;
   2030 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2031 		    && (sc->sc_type != WM_T_ICH10)
   2032 		    && (sc->sc_type != WM_T_PCH)
   2033 		    && (sc->sc_type != WM_T_PCH2)
   2034 		    && (sc->sc_type != WM_T_PCH_LPT)
   2035 		    && (sc->sc_type != WM_T_PCH_SPT)
   2036 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2037 			/* ICH* and PCH* have no PCIe capability registers */
   2038 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2039 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2040 				NULL) == 0)
   2041 				aprint_error_dev(sc->sc_dev,
   2042 				    "unable to find PCIe capability\n");
   2043 		}
   2044 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2045 	} else {
   2046 		reg = CSR_READ(sc, WMREG_STATUS);
   2047 		if (reg & STATUS_BUS64)
   2048 			sc->sc_flags |= WM_F_BUS64;
   2049 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2050 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2051 
   2052 			sc->sc_flags |= WM_F_PCIX;
   2053 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2054 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2055 				aprint_error_dev(sc->sc_dev,
   2056 				    "unable to find PCIX capability\n");
   2057 			else if (sc->sc_type != WM_T_82545_3 &&
   2058 				 sc->sc_type != WM_T_82546_3) {
   2059 				/*
   2060 				 * Work around a problem caused by the BIOS
   2061 				 * setting the max memory read byte count
   2062 				 * incorrectly.
   2063 				 */
   2064 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2065 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2066 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2067 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2068 
   2069 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2070 				    PCIX_CMD_BYTECNT_SHIFT;
   2071 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2072 				    PCIX_STATUS_MAXB_SHIFT;
   2073 				if (bytecnt > maxb) {
   2074 					aprint_verbose_dev(sc->sc_dev,
   2075 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2076 					    512 << bytecnt, 512 << maxb);
   2077 					pcix_cmd = (pcix_cmd &
   2078 					    ~PCIX_CMD_BYTECNT_MASK) |
   2079 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2080 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2081 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2082 					    pcix_cmd);
   2083 				}
   2084 			}
   2085 		}
   2086 		/*
   2087 		 * The quad port adapter is special; it has a PCIX-PCIX
   2088 		 * bridge on the board, and can run the secondary bus at
   2089 		 * a higher speed.
   2090 		 */
   2091 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2092 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2093 								      : 66;
   2094 		} else if (sc->sc_flags & WM_F_PCIX) {
   2095 			switch (reg & STATUS_PCIXSPD_MASK) {
   2096 			case STATUS_PCIXSPD_50_66:
   2097 				sc->sc_bus_speed = 66;
   2098 				break;
   2099 			case STATUS_PCIXSPD_66_100:
   2100 				sc->sc_bus_speed = 100;
   2101 				break;
   2102 			case STATUS_PCIXSPD_100_133:
   2103 				sc->sc_bus_speed = 133;
   2104 				break;
   2105 			default:
   2106 				aprint_error_dev(sc->sc_dev,
   2107 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2108 				    reg & STATUS_PCIXSPD_MASK);
   2109 				sc->sc_bus_speed = 66;
   2110 				break;
   2111 			}
   2112 		} else
   2113 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2114 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2115 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2116 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2117 	}
   2118 
   2119 	/* clear interesting stat counters */
   2120 	CSR_READ(sc, WMREG_COLC);
   2121 	CSR_READ(sc, WMREG_RXERRC);
   2122 
   2123 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2124 	    || (sc->sc_type >= WM_T_ICH8))
   2125 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2126 	if (sc->sc_type >= WM_T_ICH8)
   2127 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2128 
   2129 	/* Set PHY, NVM mutex related stuff */
   2130 	switch (sc->sc_type) {
   2131 	case WM_T_82542_2_0:
   2132 	case WM_T_82542_2_1:
   2133 	case WM_T_82543:
   2134 	case WM_T_82544:
   2135 		/* Microwire */
   2136 		sc->nvm.read = wm_nvm_read_uwire;
   2137 		sc->sc_nvm_wordsize = 64;
   2138 		sc->sc_nvm_addrbits = 6;
   2139 		break;
   2140 	case WM_T_82540:
   2141 	case WM_T_82545:
   2142 	case WM_T_82545_3:
   2143 	case WM_T_82546:
   2144 	case WM_T_82546_3:
   2145 		/* Microwire */
   2146 		sc->nvm.read = wm_nvm_read_uwire;
   2147 		reg = CSR_READ(sc, WMREG_EECD);
   2148 		if (reg & EECD_EE_SIZE) {
   2149 			sc->sc_nvm_wordsize = 256;
   2150 			sc->sc_nvm_addrbits = 8;
   2151 		} else {
   2152 			sc->sc_nvm_wordsize = 64;
   2153 			sc->sc_nvm_addrbits = 6;
   2154 		}
   2155 		sc->sc_flags |= WM_F_LOCK_EECD;
   2156 		sc->nvm.acquire = wm_get_eecd;
   2157 		sc->nvm.release = wm_put_eecd;
   2158 		break;
   2159 	case WM_T_82541:
   2160 	case WM_T_82541_2:
   2161 	case WM_T_82547:
   2162 	case WM_T_82547_2:
   2163 		reg = CSR_READ(sc, WMREG_EECD);
   2164 		/*
   2165 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2166 		 * on 8254[17], so set flags and functios before calling it.
   2167 		 */
   2168 		sc->sc_flags |= WM_F_LOCK_EECD;
   2169 		sc->nvm.acquire = wm_get_eecd;
   2170 		sc->nvm.release = wm_put_eecd;
   2171 		if (reg & EECD_EE_TYPE) {
   2172 			/* SPI */
   2173 			sc->nvm.read = wm_nvm_read_spi;
   2174 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2175 			wm_nvm_set_addrbits_size_eecd(sc);
   2176 		} else {
   2177 			/* Microwire */
   2178 			sc->nvm.read = wm_nvm_read_uwire;
   2179 			if ((reg & EECD_EE_ABITS) != 0) {
   2180 				sc->sc_nvm_wordsize = 256;
   2181 				sc->sc_nvm_addrbits = 8;
   2182 			} else {
   2183 				sc->sc_nvm_wordsize = 64;
   2184 				sc->sc_nvm_addrbits = 6;
   2185 			}
   2186 		}
   2187 		break;
   2188 	case WM_T_82571:
   2189 	case WM_T_82572:
   2190 		/* SPI */
   2191 		sc->nvm.read = wm_nvm_read_eerd;
   2192 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2193 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2194 		wm_nvm_set_addrbits_size_eecd(sc);
   2195 		sc->phy.acquire = wm_get_swsm_semaphore;
   2196 		sc->phy.release = wm_put_swsm_semaphore;
   2197 		sc->nvm.acquire = wm_get_nvm_82571;
   2198 		sc->nvm.release = wm_put_nvm_82571;
   2199 		break;
   2200 	case WM_T_82573:
   2201 	case WM_T_82574:
   2202 	case WM_T_82583:
   2203 		sc->nvm.read = wm_nvm_read_eerd;
   2204 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2205 		if (sc->sc_type == WM_T_82573) {
   2206 			sc->phy.acquire = wm_get_swsm_semaphore;
   2207 			sc->phy.release = wm_put_swsm_semaphore;
   2208 			sc->nvm.acquire = wm_get_nvm_82571;
   2209 			sc->nvm.release = wm_put_nvm_82571;
   2210 		} else {
   2211 			/* Both PHY and NVM use the same semaphore. */
   2212 			sc->phy.acquire = sc->nvm.acquire
   2213 			    = wm_get_swfwhw_semaphore;
   2214 			sc->phy.release = sc->nvm.release
   2215 			    = wm_put_swfwhw_semaphore;
   2216 		}
   2217 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2218 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2219 			sc->sc_nvm_wordsize = 2048;
   2220 		} else {
   2221 			/* SPI */
   2222 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2223 			wm_nvm_set_addrbits_size_eecd(sc);
   2224 		}
   2225 		break;
   2226 	case WM_T_82575:
   2227 	case WM_T_82576:
   2228 	case WM_T_82580:
   2229 	case WM_T_I350:
   2230 	case WM_T_I354:
   2231 	case WM_T_80003:
   2232 		/* SPI */
   2233 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2234 		wm_nvm_set_addrbits_size_eecd(sc);
   2235 		if ((sc->sc_type == WM_T_80003)
   2236 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2237 			sc->nvm.read = wm_nvm_read_eerd;
   2238 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2239 		} else {
   2240 			sc->nvm.read = wm_nvm_read_spi;
   2241 			sc->sc_flags |= WM_F_LOCK_EECD;
   2242 		}
   2243 		sc->phy.acquire = wm_get_phy_82575;
   2244 		sc->phy.release = wm_put_phy_82575;
   2245 		sc->nvm.acquire = wm_get_nvm_80003;
   2246 		sc->nvm.release = wm_put_nvm_80003;
   2247 		break;
   2248 	case WM_T_ICH8:
   2249 	case WM_T_ICH9:
   2250 	case WM_T_ICH10:
   2251 	case WM_T_PCH:
   2252 	case WM_T_PCH2:
   2253 	case WM_T_PCH_LPT:
   2254 		sc->nvm.read = wm_nvm_read_ich8;
   2255 		/* FLASH */
   2256 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2257 		sc->sc_nvm_wordsize = 2048;
   2258 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2259 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2260 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2261 			aprint_error_dev(sc->sc_dev,
   2262 			    "can't map FLASH registers\n");
   2263 			goto out;
   2264 		}
   2265 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2266 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2267 		    ICH_FLASH_SECTOR_SIZE;
   2268 		sc->sc_ich8_flash_bank_size =
   2269 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2270 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2271 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2272 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2273 		sc->sc_flashreg_offset = 0;
   2274 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2275 		sc->phy.release = wm_put_swflag_ich8lan;
   2276 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2277 		sc->nvm.release = wm_put_nvm_ich8lan;
   2278 		break;
   2279 	case WM_T_PCH_SPT:
   2280 	case WM_T_PCH_CNP:
   2281 		sc->nvm.read = wm_nvm_read_spt;
   2282 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2283 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2284 		sc->sc_flasht = sc->sc_st;
   2285 		sc->sc_flashh = sc->sc_sh;
   2286 		sc->sc_ich8_flash_base = 0;
   2287 		sc->sc_nvm_wordsize =
   2288 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2289 		    * NVM_SIZE_MULTIPLIER;
   2290 		/* It is size in bytes, we want words */
   2291 		sc->sc_nvm_wordsize /= 2;
   2292 		/* assume 2 banks */
   2293 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2294 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2295 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2296 		sc->phy.release = wm_put_swflag_ich8lan;
   2297 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2298 		sc->nvm.release = wm_put_nvm_ich8lan;
   2299 		break;
   2300 	case WM_T_I210:
   2301 	case WM_T_I211:
   2302 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2303 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2304 		if (wm_nvm_flash_presence_i210(sc)) {
   2305 			sc->nvm.read = wm_nvm_read_eerd;
   2306 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2307 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2308 			wm_nvm_set_addrbits_size_eecd(sc);
   2309 		} else {
   2310 			sc->nvm.read = wm_nvm_read_invm;
   2311 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2312 			sc->sc_nvm_wordsize = INVM_SIZE;
   2313 		}
   2314 		sc->phy.acquire = wm_get_phy_82575;
   2315 		sc->phy.release = wm_put_phy_82575;
   2316 		sc->nvm.acquire = wm_get_nvm_80003;
   2317 		sc->nvm.release = wm_put_nvm_80003;
   2318 		break;
   2319 	default:
   2320 		break;
   2321 	}
   2322 
   2323 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2324 	switch (sc->sc_type) {
   2325 	case WM_T_82571:
   2326 	case WM_T_82572:
   2327 		reg = CSR_READ(sc, WMREG_SWSM2);
   2328 		if ((reg & SWSM2_LOCK) == 0) {
   2329 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2330 			force_clear_smbi = true;
   2331 		} else
   2332 			force_clear_smbi = false;
   2333 		break;
   2334 	case WM_T_82573:
   2335 	case WM_T_82574:
   2336 	case WM_T_82583:
   2337 		force_clear_smbi = true;
   2338 		break;
   2339 	default:
   2340 		force_clear_smbi = false;
   2341 		break;
   2342 	}
   2343 	if (force_clear_smbi) {
   2344 		reg = CSR_READ(sc, WMREG_SWSM);
   2345 		if ((reg & SWSM_SMBI) != 0)
   2346 			aprint_error_dev(sc->sc_dev,
   2347 			    "Please update the Bootagent\n");
   2348 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2349 	}
   2350 
   2351 	/*
   2352 	 * Defer printing the EEPROM type until after verifying the checksum
   2353 	 * This allows the EEPROM type to be printed correctly in the case
   2354 	 * that no EEPROM is attached.
   2355 	 */
   2356 	/*
   2357 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2358 	 * this for later, so we can fail future reads from the EEPROM.
   2359 	 */
   2360 	if (wm_nvm_validate_checksum(sc)) {
   2361 		/*
   2362 		 * Read twice again because some PCI-e parts fail the
   2363 		 * first check due to the link being in sleep state.
   2364 		 */
   2365 		if (wm_nvm_validate_checksum(sc))
   2366 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2367 	}
   2368 
   2369 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2370 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2371 	else {
   2372 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2373 		    sc->sc_nvm_wordsize);
   2374 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2375 			aprint_verbose("iNVM");
   2376 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2377 			aprint_verbose("FLASH(HW)");
   2378 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2379 			aprint_verbose("FLASH");
   2380 		else {
   2381 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2382 				eetype = "SPI";
   2383 			else
   2384 				eetype = "MicroWire";
   2385 			aprint_verbose("(%d address bits) %s EEPROM",
   2386 			    sc->sc_nvm_addrbits, eetype);
   2387 		}
   2388 	}
   2389 	wm_nvm_version(sc);
   2390 	aprint_verbose("\n");
   2391 
   2392 	/*
   2393 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2394 	 * incorrect.
   2395 	 */
   2396 	wm_gmii_setup_phytype(sc, 0, 0);
   2397 
   2398 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2399 	switch (sc->sc_type) {
   2400 	case WM_T_ICH8:
   2401 	case WM_T_ICH9:
   2402 	case WM_T_ICH10:
   2403 	case WM_T_PCH:
   2404 	case WM_T_PCH2:
   2405 	case WM_T_PCH_LPT:
   2406 	case WM_T_PCH_SPT:
   2407 	case WM_T_PCH_CNP:
   2408 		apme_mask = WUC_APME;
   2409 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2410 		if ((eeprom_data & apme_mask) != 0)
   2411 			sc->sc_flags |= WM_F_WOL;
   2412 		break;
   2413 	default:
   2414 		break;
   2415 	}
   2416 
   2417 	/* Reset the chip to a known state. */
   2418 	wm_reset(sc);
   2419 
   2420 	/*
   2421 	 * Check for I21[01] PLL workaround.
   2422 	 *
   2423 	 * Three cases:
   2424 	 * a) Chip is I211.
   2425 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2426 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2427 	 */
   2428 	if (sc->sc_type == WM_T_I211)
   2429 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2430 	if (sc->sc_type == WM_T_I210) {
   2431 		if (!wm_nvm_flash_presence_i210(sc))
   2432 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2433 		else if ((sc->sc_nvm_ver_major < 3)
   2434 		    || ((sc->sc_nvm_ver_major == 3)
   2435 			&& (sc->sc_nvm_ver_minor < 25))) {
   2436 			aprint_verbose_dev(sc->sc_dev,
   2437 			    "ROM image version %d.%d is older than 3.25\n",
   2438 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2439 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2440 		}
   2441 	}
   2442 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2443 		wm_pll_workaround_i210(sc);
   2444 
   2445 	wm_get_wakeup(sc);
   2446 
   2447 	/* Non-AMT based hardware can now take control from firmware */
   2448 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2449 		wm_get_hw_control(sc);
   2450 
   2451 	/*
   2452 	 * Read the Ethernet address from the EEPROM, if not first found
   2453 	 * in device properties.
   2454 	 */
   2455 	ea = prop_dictionary_get(dict, "mac-address");
   2456 	if (ea != NULL) {
   2457 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2458 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2459 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2460 	} else {
   2461 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2462 			aprint_error_dev(sc->sc_dev,
   2463 			    "unable to read Ethernet address\n");
   2464 			goto out;
   2465 		}
   2466 	}
   2467 
   2468 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2469 	    ether_sprintf(enaddr));
   2470 
   2471 	/*
   2472 	 * Read the config info from the EEPROM, and set up various
   2473 	 * bits in the control registers based on their contents.
   2474 	 */
   2475 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2476 	if (pn != NULL) {
   2477 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2478 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2479 	} else {
   2480 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2481 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2482 			goto out;
   2483 		}
   2484 	}
   2485 
   2486 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2487 	if (pn != NULL) {
   2488 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2489 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2490 	} else {
   2491 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2492 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2493 			goto out;
   2494 		}
   2495 	}
   2496 
   2497 	/* check for WM_F_WOL */
   2498 	switch (sc->sc_type) {
   2499 	case WM_T_82542_2_0:
   2500 	case WM_T_82542_2_1:
   2501 	case WM_T_82543:
   2502 		/* dummy? */
   2503 		eeprom_data = 0;
   2504 		apme_mask = NVM_CFG3_APME;
   2505 		break;
   2506 	case WM_T_82544:
   2507 		apme_mask = NVM_CFG2_82544_APM_EN;
   2508 		eeprom_data = cfg2;
   2509 		break;
   2510 	case WM_T_82546:
   2511 	case WM_T_82546_3:
   2512 	case WM_T_82571:
   2513 	case WM_T_82572:
   2514 	case WM_T_82573:
   2515 	case WM_T_82574:
   2516 	case WM_T_82583:
   2517 	case WM_T_80003:
   2518 	case WM_T_82575:
   2519 	case WM_T_82576:
   2520 		apme_mask = NVM_CFG3_APME;
   2521 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2522 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2523 		break;
   2524 	case WM_T_82580:
   2525 	case WM_T_I350:
   2526 	case WM_T_I354:
   2527 	case WM_T_I210:
   2528 	case WM_T_I211:
   2529 		apme_mask = NVM_CFG3_APME;
   2530 		wm_nvm_read(sc,
   2531 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2532 		    1, &eeprom_data);
   2533 		break;
   2534 	case WM_T_ICH8:
   2535 	case WM_T_ICH9:
   2536 	case WM_T_ICH10:
   2537 	case WM_T_PCH:
   2538 	case WM_T_PCH2:
   2539 	case WM_T_PCH_LPT:
   2540 	case WM_T_PCH_SPT:
   2541 	case WM_T_PCH_CNP:
   2542 		/* Already checked before wm_reset () */
   2543 		apme_mask = eeprom_data = 0;
   2544 		break;
   2545 	default: /* XXX 82540 */
   2546 		apme_mask = NVM_CFG3_APME;
   2547 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2548 		break;
   2549 	}
   2550 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2551 	if ((eeprom_data & apme_mask) != 0)
   2552 		sc->sc_flags |= WM_F_WOL;
   2553 
   2554 	/*
   2555 	 * We have the eeprom settings, now apply the special cases
   2556 	 * where the eeprom may be wrong or the board won't support
   2557 	 * wake on lan on a particular port
   2558 	 */
   2559 	switch (sc->sc_pcidevid) {
   2560 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2561 		sc->sc_flags &= ~WM_F_WOL;
   2562 		break;
   2563 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2564 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2565 		/* Wake events only supported on port A for dual fiber
   2566 		 * regardless of eeprom setting */
   2567 		if (sc->sc_funcid == 1)
   2568 			sc->sc_flags &= ~WM_F_WOL;
   2569 		break;
   2570 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2571 		/* if quad port adapter, disable WoL on all but port A */
   2572 		if (sc->sc_funcid != 0)
   2573 			sc->sc_flags &= ~WM_F_WOL;
   2574 		break;
   2575 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2576 		/* Wake events only supported on port A for dual fiber
   2577 		 * regardless of eeprom setting */
   2578 		if (sc->sc_funcid == 1)
   2579 			sc->sc_flags &= ~WM_F_WOL;
   2580 		break;
   2581 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2582 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2583 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2584 		/* if quad port adapter, disable WoL on all but port A */
   2585 		if (sc->sc_funcid != 0)
   2586 			sc->sc_flags &= ~WM_F_WOL;
   2587 		break;
   2588 	}
   2589 
   2590 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2591 		/* Check NVM for autonegotiation */
   2592 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2593 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2594 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2595 		}
   2596 	}
   2597 
   2598 	/*
   2599 	 * XXX need special handling for some multiple port cards
   2600 	 * to disable a paticular port.
   2601 	 */
   2602 
   2603 	if (sc->sc_type >= WM_T_82544) {
   2604 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2605 		if (pn != NULL) {
   2606 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2607 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2608 		} else {
   2609 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2610 				aprint_error_dev(sc->sc_dev,
   2611 				    "unable to read SWDPIN\n");
   2612 				goto out;
   2613 			}
   2614 		}
   2615 	}
   2616 
   2617 	if (cfg1 & NVM_CFG1_ILOS)
   2618 		sc->sc_ctrl |= CTRL_ILOS;
   2619 
   2620 	/*
   2621 	 * XXX
   2622 	 * This code isn't correct because pin 2 and 3 are located
   2623 	 * in different position on newer chips. Check all datasheet.
   2624 	 *
   2625 	 * Until resolve this problem, check if a chip < 82580
   2626 	 */
   2627 	if (sc->sc_type <= WM_T_82580) {
   2628 		if (sc->sc_type >= WM_T_82544) {
   2629 			sc->sc_ctrl |=
   2630 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2631 			    CTRL_SWDPIO_SHIFT;
   2632 			sc->sc_ctrl |=
   2633 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2634 			    CTRL_SWDPINS_SHIFT;
   2635 		} else {
   2636 			sc->sc_ctrl |=
   2637 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2638 			    CTRL_SWDPIO_SHIFT;
   2639 		}
   2640 	}
   2641 
   2642 	/* XXX For other than 82580? */
   2643 	if (sc->sc_type == WM_T_82580) {
   2644 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2645 		if (nvmword & __BIT(13))
   2646 			sc->sc_ctrl |= CTRL_ILOS;
   2647 	}
   2648 
   2649 #if 0
   2650 	if (sc->sc_type >= WM_T_82544) {
   2651 		if (cfg1 & NVM_CFG1_IPS0)
   2652 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2653 		if (cfg1 & NVM_CFG1_IPS1)
   2654 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2655 		sc->sc_ctrl_ext |=
   2656 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2657 		    CTRL_EXT_SWDPIO_SHIFT;
   2658 		sc->sc_ctrl_ext |=
   2659 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2660 		    CTRL_EXT_SWDPINS_SHIFT;
   2661 	} else {
   2662 		sc->sc_ctrl_ext |=
   2663 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2664 		    CTRL_EXT_SWDPIO_SHIFT;
   2665 	}
   2666 #endif
   2667 
   2668 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2669 #if 0
   2670 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2671 #endif
   2672 
   2673 	if (sc->sc_type == WM_T_PCH) {
   2674 		uint16_t val;
   2675 
   2676 		/* Save the NVM K1 bit setting */
   2677 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2678 
   2679 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2680 			sc->sc_nvm_k1_enabled = 1;
   2681 		else
   2682 			sc->sc_nvm_k1_enabled = 0;
   2683 	}
   2684 
   2685 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2686 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2687 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2688 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2689 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2690 	    || sc->sc_type == WM_T_82573
   2691 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2692 		/* Copper only */
   2693 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2694 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2695 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2696 	    || (sc->sc_type ==WM_T_I211)) {
   2697 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2698 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2699 		switch (link_mode) {
   2700 		case CTRL_EXT_LINK_MODE_1000KX:
   2701 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2702 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2703 			break;
   2704 		case CTRL_EXT_LINK_MODE_SGMII:
   2705 			if (wm_sgmii_uses_mdio(sc)) {
   2706 				aprint_verbose_dev(sc->sc_dev,
   2707 				    "SGMII(MDIO)\n");
   2708 				sc->sc_flags |= WM_F_SGMII;
   2709 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2710 				break;
   2711 			}
   2712 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2713 			/*FALLTHROUGH*/
   2714 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2715 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2716 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2717 				if (link_mode
   2718 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2719 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2720 					sc->sc_flags |= WM_F_SGMII;
   2721 				} else {
   2722 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2723 					aprint_verbose_dev(sc->sc_dev,
   2724 					    "SERDES\n");
   2725 				}
   2726 				break;
   2727 			}
   2728 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2729 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2730 
   2731 			/* Change current link mode setting */
   2732 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2733 			switch (sc->sc_mediatype) {
   2734 			case WM_MEDIATYPE_COPPER:
   2735 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2736 				break;
   2737 			case WM_MEDIATYPE_SERDES:
   2738 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2739 				break;
   2740 			default:
   2741 				break;
   2742 			}
   2743 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2744 			break;
   2745 		case CTRL_EXT_LINK_MODE_GMII:
   2746 		default:
   2747 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2748 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2749 			break;
   2750 		}
   2751 
   2752 		reg &= ~CTRL_EXT_I2C_ENA;
   2753 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2754 			reg |= CTRL_EXT_I2C_ENA;
   2755 		else
   2756 			reg &= ~CTRL_EXT_I2C_ENA;
   2757 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2758 	} else if (sc->sc_type < WM_T_82543 ||
   2759 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2760 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2761 			aprint_error_dev(sc->sc_dev,
   2762 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2763 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2764 		}
   2765 	} else {
   2766 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2767 			aprint_error_dev(sc->sc_dev,
   2768 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2769 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2770 		}
   2771 	}
   2772 
   2773 	if (sc->sc_type >= WM_T_PCH2)
   2774 		sc->sc_flags |= WM_F_EEE;
   2775 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2776 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2777 		/* XXX: Need special handling for I354. (not yet) */
   2778 		if (sc->sc_type != WM_T_I354)
   2779 			sc->sc_flags |= WM_F_EEE;
   2780 	}
   2781 
   2782 	/* Set device properties (macflags) */
   2783 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2784 
   2785 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2786 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2787 
   2788 	/* Initialize the media structures accordingly. */
   2789 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2790 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2791 	else
   2792 		wm_tbi_mediainit(sc); /* All others */
   2793 
   2794 	ifp = &sc->sc_ethercom.ec_if;
   2795 	xname = device_xname(sc->sc_dev);
   2796 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2797 	ifp->if_softc = sc;
   2798 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2799 #ifdef WM_MPSAFE
   2800 	ifp->if_extflags = IFEF_MPSAFE;
   2801 #endif
   2802 	ifp->if_ioctl = wm_ioctl;
   2803 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2804 		ifp->if_start = wm_nq_start;
   2805 		/*
   2806 		 * When the number of CPUs is one and the controller can use
   2807 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2808 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2809 		 * and the other is used for link status changing.
   2810 		 * In this situation, wm_nq_transmit() is disadvantageous
   2811 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2812 		 */
   2813 		if (wm_is_using_multiqueue(sc))
   2814 			ifp->if_transmit = wm_nq_transmit;
   2815 	} else {
   2816 		ifp->if_start = wm_start;
   2817 		/*
   2818 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2819 		 */
   2820 		if (wm_is_using_multiqueue(sc))
   2821 			ifp->if_transmit = wm_transmit;
   2822 	}
   2823 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2824 	ifp->if_init = wm_init;
   2825 	ifp->if_stop = wm_stop;
   2826 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2827 	IFQ_SET_READY(&ifp->if_snd);
   2828 
   2829 	/* Check for jumbo frame */
   2830 	switch (sc->sc_type) {
   2831 	case WM_T_82573:
   2832 		/* XXX limited to 9234 if ASPM is disabled */
   2833 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2834 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2835 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2836 		break;
   2837 	case WM_T_82571:
   2838 	case WM_T_82572:
   2839 	case WM_T_82574:
   2840 	case WM_T_82583:
   2841 	case WM_T_82575:
   2842 	case WM_T_82576:
   2843 	case WM_T_82580:
   2844 	case WM_T_I350:
   2845 	case WM_T_I354:
   2846 	case WM_T_I210:
   2847 	case WM_T_I211:
   2848 	case WM_T_80003:
   2849 	case WM_T_ICH9:
   2850 	case WM_T_ICH10:
   2851 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2852 	case WM_T_PCH_LPT:
   2853 	case WM_T_PCH_SPT:
   2854 	case WM_T_PCH_CNP:
   2855 		/* XXX limited to 9234 */
   2856 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2857 		break;
   2858 	case WM_T_PCH:
   2859 		/* XXX limited to 4096 */
   2860 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2861 		break;
   2862 	case WM_T_82542_2_0:
   2863 	case WM_T_82542_2_1:
   2864 	case WM_T_ICH8:
   2865 		/* No support for jumbo frame */
   2866 		break;
   2867 	default:
   2868 		/* ETHER_MAX_LEN_JUMBO */
   2869 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2870 		break;
   2871 	}
   2872 
   2873 	/* If we're a i82543 or greater, we can support VLANs. */
   2874 	if (sc->sc_type >= WM_T_82543)
   2875 		sc->sc_ethercom.ec_capabilities |=
   2876 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2877 
   2878 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2879 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2880 
   2881 	/*
   2882 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2883 	 * on i82543 and later.
   2884 	 */
   2885 	if (sc->sc_type >= WM_T_82543) {
   2886 		ifp->if_capabilities |=
   2887 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2888 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2889 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2890 		    IFCAP_CSUM_TCPv6_Tx |
   2891 		    IFCAP_CSUM_UDPv6_Tx;
   2892 	}
   2893 
   2894 	/*
   2895 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2896 	 *
   2897 	 *	82541GI (8086:1076) ... no
   2898 	 *	82572EI (8086:10b9) ... yes
   2899 	 */
   2900 	if (sc->sc_type >= WM_T_82571) {
   2901 		ifp->if_capabilities |=
   2902 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2903 	}
   2904 
   2905 	/*
   2906 	 * If we're a i82544 or greater (except i82547), we can do
   2907 	 * TCP segmentation offload.
   2908 	 */
   2909 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2910 		ifp->if_capabilities |= IFCAP_TSOv4;
   2911 	}
   2912 
   2913 	if (sc->sc_type >= WM_T_82571) {
   2914 		ifp->if_capabilities |= IFCAP_TSOv6;
   2915 	}
   2916 
   2917 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2918 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2919 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2920 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2921 
   2922 #ifdef WM_MPSAFE
   2923 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2924 #else
   2925 	sc->sc_core_lock = NULL;
   2926 #endif
   2927 
   2928 	/* Attach the interface. */
   2929 	error = if_initialize(ifp);
   2930 	if (error != 0) {
   2931 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2932 		    error);
   2933 		return; /* Error */
   2934 	}
   2935 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2936 	ether_ifattach(ifp, enaddr);
   2937 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2938 	if_register(ifp);
   2939 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2940 	    RND_FLAG_DEFAULT);
   2941 
   2942 #ifdef WM_EVENT_COUNTERS
   2943 	/* Attach event counters. */
   2944 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2945 	    NULL, xname, "linkintr");
   2946 
   2947 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2948 	    NULL, xname, "tx_xoff");
   2949 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2950 	    NULL, xname, "tx_xon");
   2951 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2952 	    NULL, xname, "rx_xoff");
   2953 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2954 	    NULL, xname, "rx_xon");
   2955 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2956 	    NULL, xname, "rx_macctl");
   2957 #endif /* WM_EVENT_COUNTERS */
   2958 
   2959 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2960 		pmf_class_network_register(self, ifp);
   2961 	else
   2962 		aprint_error_dev(self, "couldn't establish power handler\n");
   2963 
   2964 	sc->sc_flags |= WM_F_ATTACHED;
   2965 out:
   2966 	return;
   2967 }
   2968 
   2969 /* The detach function (ca_detach) */
   2970 static int
   2971 wm_detach(device_t self, int flags __unused)
   2972 {
   2973 	struct wm_softc *sc = device_private(self);
   2974 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2975 	int i;
   2976 
   2977 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2978 		return 0;
   2979 
   2980 	/* Stop the interface. Callouts are stopped in it. */
   2981 	wm_stop(ifp, 1);
   2982 
   2983 	pmf_device_deregister(self);
   2984 
   2985 #ifdef WM_EVENT_COUNTERS
   2986 	evcnt_detach(&sc->sc_ev_linkintr);
   2987 
   2988 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2989 	evcnt_detach(&sc->sc_ev_tx_xon);
   2990 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2991 	evcnt_detach(&sc->sc_ev_rx_xon);
   2992 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2993 #endif /* WM_EVENT_COUNTERS */
   2994 
   2995 	/* Tell the firmware about the release */
   2996 	WM_CORE_LOCK(sc);
   2997 	wm_release_manageability(sc);
   2998 	wm_release_hw_control(sc);
   2999 	wm_enable_wakeup(sc);
   3000 	WM_CORE_UNLOCK(sc);
   3001 
   3002 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3003 
   3004 	/* Delete all remaining media. */
   3005 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   3006 
   3007 	ether_ifdetach(ifp);
   3008 	if_detach(ifp);
   3009 	if_percpuq_destroy(sc->sc_ipq);
   3010 
   3011 	/* Unload RX dmamaps and free mbufs */
   3012 	for (i = 0; i < sc->sc_nqueues; i++) {
   3013 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3014 		mutex_enter(rxq->rxq_lock);
   3015 		wm_rxdrain(rxq);
   3016 		mutex_exit(rxq->rxq_lock);
   3017 	}
   3018 	/* Must unlock here */
   3019 
   3020 	/* Disestablish the interrupt handler */
   3021 	for (i = 0; i < sc->sc_nintrs; i++) {
   3022 		if (sc->sc_ihs[i] != NULL) {
   3023 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3024 			sc->sc_ihs[i] = NULL;
   3025 		}
   3026 	}
   3027 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3028 
   3029 	wm_free_txrx_queues(sc);
   3030 
   3031 	/* Unmap the registers */
   3032 	if (sc->sc_ss) {
   3033 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3034 		sc->sc_ss = 0;
   3035 	}
   3036 	if (sc->sc_ios) {
   3037 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3038 		sc->sc_ios = 0;
   3039 	}
   3040 	if (sc->sc_flashs) {
   3041 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3042 		sc->sc_flashs = 0;
   3043 	}
   3044 
   3045 	if (sc->sc_core_lock)
   3046 		mutex_obj_free(sc->sc_core_lock);
   3047 	if (sc->sc_ich_phymtx)
   3048 		mutex_obj_free(sc->sc_ich_phymtx);
   3049 	if (sc->sc_ich_nvmmtx)
   3050 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3051 
   3052 	return 0;
   3053 }
   3054 
   3055 static bool
   3056 wm_suspend(device_t self, const pmf_qual_t *qual)
   3057 {
   3058 	struct wm_softc *sc = device_private(self);
   3059 
   3060 	wm_release_manageability(sc);
   3061 	wm_release_hw_control(sc);
   3062 	wm_enable_wakeup(sc);
   3063 
   3064 	return true;
   3065 }
   3066 
   3067 static bool
   3068 wm_resume(device_t self, const pmf_qual_t *qual)
   3069 {
   3070 	struct wm_softc *sc = device_private(self);
   3071 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3072 	pcireg_t reg;
   3073 	char buf[256];
   3074 
   3075 	reg = CSR_READ(sc, WMREG_WUS);
   3076 	if (reg != 0) {
   3077 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3078 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3079 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3080 	}
   3081 
   3082 	if (sc->sc_type >= WM_T_PCH2)
   3083 		wm_resume_workarounds_pchlan(sc);
   3084 	if ((ifp->if_flags & IFF_UP) == 0) {
   3085 		wm_reset(sc);
   3086 		/* Non-AMT based hardware can now take control from firmware */
   3087 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3088 			wm_get_hw_control(sc);
   3089 		wm_init_manageability(sc);
   3090 	} else {
   3091 		/*
   3092 		 * We called pmf_class_network_register(), so if_init() is
   3093 		 * automatically called when IFF_UP. wm_reset(),
   3094 		 * wm_get_hw_control() and wm_init_manageability() are called
   3095 		 * via wm_init().
   3096 		 */
   3097 	}
   3098 
   3099 	return true;
   3100 }
   3101 
   3102 /*
   3103  * wm_watchdog:		[ifnet interface function]
   3104  *
   3105  *	Watchdog timer handler.
   3106  */
   3107 static void
   3108 wm_watchdog(struct ifnet *ifp)
   3109 {
   3110 	int qid;
   3111 	struct wm_softc *sc = ifp->if_softc;
   3112 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3113 
   3114 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3115 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3116 
   3117 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3118 	}
   3119 
   3120 	/*
   3121 	 * IF any of queues hanged up, reset the interface.
   3122 	 */
   3123 	if (hang_queue != 0) {
   3124 		(void) wm_init(ifp);
   3125 
   3126 		/*
   3127 		 * There are still some upper layer processing which call
   3128 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3129 		 */
   3130 		/* Try to get more packets going. */
   3131 		ifp->if_start(ifp);
   3132 	}
   3133 }
   3134 
   3135 
   3136 static void
   3137 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3138 {
   3139 
   3140 	mutex_enter(txq->txq_lock);
   3141 	if (txq->txq_sending &&
   3142 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3143 		wm_watchdog_txq_locked(ifp, txq, hang);
   3144 	}
   3145 	mutex_exit(txq->txq_lock);
   3146 }
   3147 
   3148 static void
   3149 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3150     uint16_t *hang)
   3151 {
   3152 	struct wm_softc *sc = ifp->if_softc;
   3153 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3154 
   3155 	KASSERT(mutex_owned(txq->txq_lock));
   3156 
   3157 	/*
   3158 	 * Since we're using delayed interrupts, sweep up
   3159 	 * before we report an error.
   3160 	 */
   3161 	wm_txeof(txq, UINT_MAX);
   3162 
   3163 	if (txq->txq_sending)
   3164 		*hang |= __BIT(wmq->wmq_id);
   3165 
   3166 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3167 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3168 		    device_xname(sc->sc_dev));
   3169 	} else {
   3170 #ifdef WM_DEBUG
   3171 		int i, j;
   3172 		struct wm_txsoft *txs;
   3173 #endif
   3174 		log(LOG_ERR,
   3175 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3176 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3177 		    txq->txq_next);
   3178 		ifp->if_oerrors++;
   3179 #ifdef WM_DEBUG
   3180 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3181 		    i = WM_NEXTTXS(txq, i)) {
   3182 		    txs = &txq->txq_soft[i];
   3183 		    printf("txs %d tx %d -> %d\n",
   3184 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3185 		    for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3186 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3187 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3188 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3189 				    printf("\t %#08x%08x\n",
   3190 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3191 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3192 			    } else {
   3193 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3194 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3195 					txq->txq_descs[j].wtx_addr.wa_low);
   3196 				    printf("\t %#04x%02x%02x%08x\n",
   3197 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3198 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3199 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3200 					txq->txq_descs[j].wtx_cmdlen);
   3201 			    }
   3202 			if (j == txs->txs_lastdesc)
   3203 				break;
   3204 			}
   3205 		}
   3206 #endif
   3207 	}
   3208 }
   3209 
   3210 /*
   3211  * wm_tick:
   3212  *
   3213  *	One second timer, used to check link status, sweep up
   3214  *	completed transmit jobs, etc.
   3215  */
   3216 static void
   3217 wm_tick(void *arg)
   3218 {
   3219 	struct wm_softc *sc = arg;
   3220 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3221 #ifndef WM_MPSAFE
   3222 	int s = splnet();
   3223 #endif
   3224 
   3225 	WM_CORE_LOCK(sc);
   3226 
   3227 	if (sc->sc_core_stopping) {
   3228 		WM_CORE_UNLOCK(sc);
   3229 #ifndef WM_MPSAFE
   3230 		splx(s);
   3231 #endif
   3232 		return;
   3233 	}
   3234 
   3235 	if (sc->sc_type >= WM_T_82542_2_1) {
   3236 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3237 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3238 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3239 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3240 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3241 	}
   3242 
   3243 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3244 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3245 	    + CSR_READ(sc, WMREG_CRCERRS)
   3246 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3247 	    + CSR_READ(sc, WMREG_SYMERRC)
   3248 	    + CSR_READ(sc, WMREG_RXERRC)
   3249 	    + CSR_READ(sc, WMREG_SEC)
   3250 	    + CSR_READ(sc, WMREG_CEXTERR)
   3251 	    + CSR_READ(sc, WMREG_RLEC);
   3252 	/*
   3253 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3254 	 * memory. It does not mean the number of dropped packet. Because
   3255 	 * ethernet controller can receive packets in such case if there is
   3256 	 * space in phy's FIFO.
   3257 	 *
   3258 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3259 	 * own EVCNT instead of if_iqdrops.
   3260 	 */
   3261 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3262 
   3263 	if (sc->sc_flags & WM_F_HAS_MII)
   3264 		mii_tick(&sc->sc_mii);
   3265 	else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)
   3266 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3267 		wm_serdes_tick(sc);
   3268 	else
   3269 		wm_tbi_tick(sc);
   3270 
   3271 	WM_CORE_UNLOCK(sc);
   3272 
   3273 	wm_watchdog(ifp);
   3274 
   3275 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3276 }
   3277 
   3278 static int
   3279 wm_ifflags_cb(struct ethercom *ec)
   3280 {
   3281 	struct ifnet *ifp = &ec->ec_if;
   3282 	struct wm_softc *sc = ifp->if_softc;
   3283 	int iffchange, ecchange;
   3284 	bool needreset = false;
   3285 	int rc = 0;
   3286 
   3287 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3288 		device_xname(sc->sc_dev), __func__));
   3289 
   3290 	WM_CORE_LOCK(sc);
   3291 
   3292 	/*
   3293 	 * Check for if_flags.
   3294 	 * Main usage is to prevent linkdown when opening bpf.
   3295 	 */
   3296 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3297 	sc->sc_if_flags = ifp->if_flags;
   3298 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3299 		needreset = true;
   3300 		goto ec;
   3301 	}
   3302 
   3303 	/* iff related updates */
   3304 	if ((iffchange & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3305 		wm_set_filter(sc);
   3306 
   3307 	wm_set_vlan(sc);
   3308 
   3309 ec:
   3310 	/* Check for ec_capenable. */
   3311 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3312 	sc->sc_ec_capenable = ec->ec_capenable;
   3313 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3314 		needreset = true;
   3315 		goto out;
   3316 	}
   3317 
   3318 	/* ec related updates */
   3319 	wm_set_eee(sc);
   3320 
   3321 out:
   3322 	if (needreset)
   3323 		rc = ENETRESET;
   3324 	WM_CORE_UNLOCK(sc);
   3325 
   3326 	return rc;
   3327 }
   3328 
   3329 /*
   3330  * wm_ioctl:		[ifnet interface function]
   3331  *
   3332  *	Handle control requests from the operator.
   3333  */
   3334 static int
   3335 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3336 {
   3337 	struct wm_softc *sc = ifp->if_softc;
   3338 	struct ifreq *ifr = (struct ifreq *) data;
   3339 	struct ifaddr *ifa = (struct ifaddr *)data;
   3340 	struct sockaddr_dl *sdl;
   3341 	int s, error;
   3342 
   3343 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3344 		device_xname(sc->sc_dev), __func__));
   3345 
   3346 #ifndef WM_MPSAFE
   3347 	s = splnet();
   3348 #endif
   3349 	switch (cmd) {
   3350 	case SIOCSIFMEDIA:
   3351 	case SIOCGIFMEDIA:
   3352 		WM_CORE_LOCK(sc);
   3353 		/* Flow control requires full-duplex mode. */
   3354 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3355 		    (ifr->ifr_media & IFM_FDX) == 0)
   3356 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3357 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3358 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3359 				/* We can do both TXPAUSE and RXPAUSE. */
   3360 				ifr->ifr_media |=
   3361 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3362 			}
   3363 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3364 		}
   3365 		WM_CORE_UNLOCK(sc);
   3366 #ifdef WM_MPSAFE
   3367 		s = splnet();
   3368 #endif
   3369 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3370 #ifdef WM_MPSAFE
   3371 		splx(s);
   3372 #endif
   3373 		break;
   3374 	case SIOCINITIFADDR:
   3375 		WM_CORE_LOCK(sc);
   3376 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3377 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3378 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3379 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3380 			/* unicast address is first multicast entry */
   3381 			wm_set_filter(sc);
   3382 			error = 0;
   3383 			WM_CORE_UNLOCK(sc);
   3384 			break;
   3385 		}
   3386 		WM_CORE_UNLOCK(sc);
   3387 		/*FALLTHROUGH*/
   3388 	default:
   3389 #ifdef WM_MPSAFE
   3390 		s = splnet();
   3391 #endif
   3392 		/* It may call wm_start, so unlock here */
   3393 		error = ether_ioctl(ifp, cmd, data);
   3394 #ifdef WM_MPSAFE
   3395 		splx(s);
   3396 #endif
   3397 		if (error != ENETRESET)
   3398 			break;
   3399 
   3400 		error = 0;
   3401 
   3402 		if (cmd == SIOCSIFCAP)
   3403 			error = (*ifp->if_init)(ifp);
   3404 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3405 			;
   3406 		else if (ifp->if_flags & IFF_RUNNING) {
   3407 			/*
   3408 			 * Multicast list has changed; set the hardware filter
   3409 			 * accordingly.
   3410 			 */
   3411 			WM_CORE_LOCK(sc);
   3412 			wm_set_filter(sc);
   3413 			WM_CORE_UNLOCK(sc);
   3414 		}
   3415 		break;
   3416 	}
   3417 
   3418 #ifndef WM_MPSAFE
   3419 	splx(s);
   3420 #endif
   3421 	return error;
   3422 }
   3423 
   3424 /* MAC address related */
   3425 
   3426 /*
   3427  * Get the offset of MAC address and return it.
   3428  * If error occured, use offset 0.
   3429  */
   3430 static uint16_t
   3431 wm_check_alt_mac_addr(struct wm_softc *sc)
   3432 {
   3433 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3434 	uint16_t offset = NVM_OFF_MACADDR;
   3435 
   3436 	/* Try to read alternative MAC address pointer */
   3437 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3438 		return 0;
   3439 
   3440 	/* Check pointer if it's valid or not. */
   3441 	if ((offset == 0x0000) || (offset == 0xffff))
   3442 		return 0;
   3443 
   3444 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3445 	/*
   3446 	 * Check whether alternative MAC address is valid or not.
   3447 	 * Some cards have non 0xffff pointer but those don't use
   3448 	 * alternative MAC address in reality.
   3449 	 *
   3450 	 * Check whether the broadcast bit is set or not.
   3451 	 */
   3452 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3453 		if (((myea[0] & 0xff) & 0x01) == 0)
   3454 			return offset; /* Found */
   3455 
   3456 	/* Not found */
   3457 	return 0;
   3458 }
   3459 
   3460 static int
   3461 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3462 {
   3463 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3464 	uint16_t offset = NVM_OFF_MACADDR;
   3465 	int do_invert = 0;
   3466 
   3467 	switch (sc->sc_type) {
   3468 	case WM_T_82580:
   3469 	case WM_T_I350:
   3470 	case WM_T_I354:
   3471 		/* EEPROM Top Level Partitioning */
   3472 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3473 		break;
   3474 	case WM_T_82571:
   3475 	case WM_T_82575:
   3476 	case WM_T_82576:
   3477 	case WM_T_80003:
   3478 	case WM_T_I210:
   3479 	case WM_T_I211:
   3480 		offset = wm_check_alt_mac_addr(sc);
   3481 		if (offset == 0)
   3482 			if ((sc->sc_funcid & 0x01) == 1)
   3483 				do_invert = 1;
   3484 		break;
   3485 	default:
   3486 		if ((sc->sc_funcid & 0x01) == 1)
   3487 			do_invert = 1;
   3488 		break;
   3489 	}
   3490 
   3491 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3492 		goto bad;
   3493 
   3494 	enaddr[0] = myea[0] & 0xff;
   3495 	enaddr[1] = myea[0] >> 8;
   3496 	enaddr[2] = myea[1] & 0xff;
   3497 	enaddr[3] = myea[1] >> 8;
   3498 	enaddr[4] = myea[2] & 0xff;
   3499 	enaddr[5] = myea[2] >> 8;
   3500 
   3501 	/*
   3502 	 * Toggle the LSB of the MAC address on the second port
   3503 	 * of some dual port cards.
   3504 	 */
   3505 	if (do_invert != 0)
   3506 		enaddr[5] ^= 1;
   3507 
   3508 	return 0;
   3509 
   3510  bad:
   3511 	return -1;
   3512 }
   3513 
   3514 /*
   3515  * wm_set_ral:
   3516  *
   3517  *	Set an entery in the receive address list.
   3518  */
   3519 static void
   3520 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3521 {
   3522 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3523 	uint32_t wlock_mac;
   3524 	int rv;
   3525 
   3526 	if (enaddr != NULL) {
   3527 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3528 		    (enaddr[3] << 24);
   3529 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3530 		ral_hi |= RAL_AV;
   3531 	} else {
   3532 		ral_lo = 0;
   3533 		ral_hi = 0;
   3534 	}
   3535 
   3536 	switch (sc->sc_type) {
   3537 	case WM_T_82542_2_0:
   3538 	case WM_T_82542_2_1:
   3539 	case WM_T_82543:
   3540 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3541 		CSR_WRITE_FLUSH(sc);
   3542 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3543 		CSR_WRITE_FLUSH(sc);
   3544 		break;
   3545 	case WM_T_PCH2:
   3546 	case WM_T_PCH_LPT:
   3547 	case WM_T_PCH_SPT:
   3548 	case WM_T_PCH_CNP:
   3549 		if (idx == 0) {
   3550 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3551 			CSR_WRITE_FLUSH(sc);
   3552 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3553 			CSR_WRITE_FLUSH(sc);
   3554 			return;
   3555 		}
   3556 		if (sc->sc_type != WM_T_PCH2) {
   3557 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3558 			    FWSM_WLOCK_MAC);
   3559 			addrl = WMREG_SHRAL(idx - 1);
   3560 			addrh = WMREG_SHRAH(idx - 1);
   3561 		} else {
   3562 			wlock_mac = 0;
   3563 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3564 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3565 		}
   3566 
   3567 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3568 			rv = wm_get_swflag_ich8lan(sc);
   3569 			if (rv != 0)
   3570 				return;
   3571 			CSR_WRITE(sc, addrl, ral_lo);
   3572 			CSR_WRITE_FLUSH(sc);
   3573 			CSR_WRITE(sc, addrh, ral_hi);
   3574 			CSR_WRITE_FLUSH(sc);
   3575 			wm_put_swflag_ich8lan(sc);
   3576 		}
   3577 
   3578 		break;
   3579 	default:
   3580 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3581 		CSR_WRITE_FLUSH(sc);
   3582 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3583 		CSR_WRITE_FLUSH(sc);
   3584 		break;
   3585 	}
   3586 }
   3587 
   3588 /*
   3589  * wm_mchash:
   3590  *
   3591  *	Compute the hash of the multicast address for the 4096-bit
   3592  *	multicast filter.
   3593  */
   3594 static uint32_t
   3595 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3596 {
   3597 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3598 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3599 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3600 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3601 	uint32_t hash;
   3602 
   3603 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3604 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3605 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3606 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3607 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3608 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3609 		return (hash & 0x3ff);
   3610 	}
   3611 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3612 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3613 
   3614 	return (hash & 0xfff);
   3615 }
   3616 
   3617 /*
   3618  *
   3619  *
   3620  */
   3621 static int
   3622 wm_rar_count(struct wm_softc *sc)
   3623 {
   3624 	int size;
   3625 
   3626 	switch (sc->sc_type) {
   3627 	case WM_T_ICH8:
   3628 		size = WM_RAL_TABSIZE_ICH8 -1;
   3629 		break;
   3630 	case WM_T_ICH9:
   3631 	case WM_T_ICH10:
   3632 	case WM_T_PCH:
   3633 		size = WM_RAL_TABSIZE_ICH8;
   3634 		break;
   3635 	case WM_T_PCH2:
   3636 		size = WM_RAL_TABSIZE_PCH2;
   3637 		break;
   3638 	case WM_T_PCH_LPT:
   3639 	case WM_T_PCH_SPT:
   3640 	case WM_T_PCH_CNP:
   3641 		size = WM_RAL_TABSIZE_PCH_LPT;
   3642 		break;
   3643 	case WM_T_82575:
   3644 		size = WM_RAL_TABSIZE_82575;
   3645 		break;
   3646 	case WM_T_82576:
   3647 	case WM_T_82580:
   3648 		size = WM_RAL_TABSIZE_82576;
   3649 		break;
   3650 	case WM_T_I350:
   3651 	case WM_T_I354:
   3652 		size = WM_RAL_TABSIZE_I350;
   3653 		break;
   3654 	default:
   3655 		size = WM_RAL_TABSIZE;
   3656 	}
   3657 
   3658 	return size;
   3659 }
   3660 
   3661 /*
   3662  * wm_set_filter:
   3663  *
   3664  *	Set up the receive filter.
   3665  */
   3666 static void
   3667 wm_set_filter(struct wm_softc *sc)
   3668 {
   3669 	struct ethercom *ec = &sc->sc_ethercom;
   3670 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3671 	struct ether_multi *enm;
   3672 	struct ether_multistep step;
   3673 	bus_addr_t mta_reg;
   3674 	uint32_t hash, reg, bit;
   3675 	int i, size, ralmax;
   3676 
   3677 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3678 		device_xname(sc->sc_dev), __func__));
   3679 
   3680 	if (sc->sc_type >= WM_T_82544)
   3681 		mta_reg = WMREG_CORDOVA_MTA;
   3682 	else
   3683 		mta_reg = WMREG_MTA;
   3684 
   3685 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3686 
   3687 	if (ifp->if_flags & IFF_BROADCAST)
   3688 		sc->sc_rctl |= RCTL_BAM;
   3689 	if (ifp->if_flags & IFF_PROMISC) {
   3690 		sc->sc_rctl |= RCTL_UPE;
   3691 		goto allmulti;
   3692 	}
   3693 
   3694 	/*
   3695 	 * Set the station address in the first RAL slot, and
   3696 	 * clear the remaining slots.
   3697 	 */
   3698 	size = wm_rar_count(sc);
   3699 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3700 
   3701 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3702 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3703 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3704 		switch (i) {
   3705 		case 0:
   3706 			/* We can use all entries */
   3707 			ralmax = size;
   3708 			break;
   3709 		case 1:
   3710 			/* Only RAR[0] */
   3711 			ralmax = 1;
   3712 			break;
   3713 		default:
   3714 			/* available SHRA + RAR[0] */
   3715 			ralmax = i + 1;
   3716 		}
   3717 	} else
   3718 		ralmax = size;
   3719 	for (i = 1; i < size; i++) {
   3720 		if (i < ralmax)
   3721 			wm_set_ral(sc, NULL, i);
   3722 	}
   3723 
   3724 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3725 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3726 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3727 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3728 		size = WM_ICH8_MC_TABSIZE;
   3729 	else
   3730 		size = WM_MC_TABSIZE;
   3731 	/* Clear out the multicast table. */
   3732 	for (i = 0; i < size; i++) {
   3733 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3734 		CSR_WRITE_FLUSH(sc);
   3735 	}
   3736 
   3737 	ETHER_LOCK(ec);
   3738 	ETHER_FIRST_MULTI(step, ec, enm);
   3739 	while (enm != NULL) {
   3740 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3741 			ETHER_UNLOCK(ec);
   3742 			/*
   3743 			 * We must listen to a range of multicast addresses.
   3744 			 * For now, just accept all multicasts, rather than
   3745 			 * trying to set only those filter bits needed to match
   3746 			 * the range.  (At this time, the only use of address
   3747 			 * ranges is for IP multicast routing, for which the
   3748 			 * range is big enough to require all bits set.)
   3749 			 */
   3750 			goto allmulti;
   3751 		}
   3752 
   3753 		hash = wm_mchash(sc, enm->enm_addrlo);
   3754 
   3755 		reg = (hash >> 5);
   3756 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3757 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3758 		    || (sc->sc_type == WM_T_PCH2)
   3759 		    || (sc->sc_type == WM_T_PCH_LPT)
   3760 		    || (sc->sc_type == WM_T_PCH_SPT)
   3761 		    || (sc->sc_type == WM_T_PCH_CNP))
   3762 			reg &= 0x1f;
   3763 		else
   3764 			reg &= 0x7f;
   3765 		bit = hash & 0x1f;
   3766 
   3767 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3768 		hash |= 1U << bit;
   3769 
   3770 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3771 			/*
   3772 			 * 82544 Errata 9: Certain register cannot be written
   3773 			 * with particular alignments in PCI-X bus operation
   3774 			 * (FCAH, MTA and VFTA).
   3775 			 */
   3776 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3777 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3778 			CSR_WRITE_FLUSH(sc);
   3779 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3780 			CSR_WRITE_FLUSH(sc);
   3781 		} else {
   3782 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3783 			CSR_WRITE_FLUSH(sc);
   3784 		}
   3785 
   3786 		ETHER_NEXT_MULTI(step, enm);
   3787 	}
   3788 	ETHER_UNLOCK(ec);
   3789 
   3790 	ifp->if_flags &= ~IFF_ALLMULTI;
   3791 	goto setit;
   3792 
   3793  allmulti:
   3794 	ifp->if_flags |= IFF_ALLMULTI;
   3795 	sc->sc_rctl |= RCTL_MPE;
   3796 
   3797  setit:
   3798 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3799 }
   3800 
   3801 /* Reset and init related */
   3802 
   3803 static void
   3804 wm_set_vlan(struct wm_softc *sc)
   3805 {
   3806 
   3807 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3808 		device_xname(sc->sc_dev), __func__));
   3809 
   3810 	/* Deal with VLAN enables. */
   3811 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3812 		sc->sc_ctrl |= CTRL_VME;
   3813 	else
   3814 		sc->sc_ctrl &= ~CTRL_VME;
   3815 
   3816 	/* Write the control registers. */
   3817 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3818 }
   3819 
   3820 static void
   3821 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3822 {
   3823 	uint32_t gcr;
   3824 	pcireg_t ctrl2;
   3825 
   3826 	gcr = CSR_READ(sc, WMREG_GCR);
   3827 
   3828 	/* Only take action if timeout value is defaulted to 0 */
   3829 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3830 		goto out;
   3831 
   3832 	if ((gcr & GCR_CAP_VER2) == 0) {
   3833 		gcr |= GCR_CMPL_TMOUT_10MS;
   3834 		goto out;
   3835 	}
   3836 
   3837 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3838 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3839 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3840 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3841 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3842 
   3843 out:
   3844 	/* Disable completion timeout resend */
   3845 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3846 
   3847 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3848 }
   3849 
   3850 void
   3851 wm_get_auto_rd_done(struct wm_softc *sc)
   3852 {
   3853 	int i;
   3854 
   3855 	/* wait for eeprom to reload */
   3856 	switch (sc->sc_type) {
   3857 	case WM_T_82571:
   3858 	case WM_T_82572:
   3859 	case WM_T_82573:
   3860 	case WM_T_82574:
   3861 	case WM_T_82583:
   3862 	case WM_T_82575:
   3863 	case WM_T_82576:
   3864 	case WM_T_82580:
   3865 	case WM_T_I350:
   3866 	case WM_T_I354:
   3867 	case WM_T_I210:
   3868 	case WM_T_I211:
   3869 	case WM_T_80003:
   3870 	case WM_T_ICH8:
   3871 	case WM_T_ICH9:
   3872 		for (i = 0; i < 10; i++) {
   3873 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3874 				break;
   3875 			delay(1000);
   3876 		}
   3877 		if (i == 10) {
   3878 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3879 			    "complete\n", device_xname(sc->sc_dev));
   3880 		}
   3881 		break;
   3882 	default:
   3883 		break;
   3884 	}
   3885 }
   3886 
   3887 void
   3888 wm_lan_init_done(struct wm_softc *sc)
   3889 {
   3890 	uint32_t reg = 0;
   3891 	int i;
   3892 
   3893 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3894 		device_xname(sc->sc_dev), __func__));
   3895 
   3896 	/* Wait for eeprom to reload */
   3897 	switch (sc->sc_type) {
   3898 	case WM_T_ICH10:
   3899 	case WM_T_PCH:
   3900 	case WM_T_PCH2:
   3901 	case WM_T_PCH_LPT:
   3902 	case WM_T_PCH_SPT:
   3903 	case WM_T_PCH_CNP:
   3904 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3905 			reg = CSR_READ(sc, WMREG_STATUS);
   3906 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3907 				break;
   3908 			delay(100);
   3909 		}
   3910 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3911 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3912 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3913 		}
   3914 		break;
   3915 	default:
   3916 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3917 		    __func__);
   3918 		break;
   3919 	}
   3920 
   3921 	reg &= ~STATUS_LAN_INIT_DONE;
   3922 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3923 }
   3924 
   3925 void
   3926 wm_get_cfg_done(struct wm_softc *sc)
   3927 {
   3928 	int mask;
   3929 	uint32_t reg;
   3930 	int i;
   3931 
   3932 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3933 		device_xname(sc->sc_dev), __func__));
   3934 
   3935 	/* Wait for eeprom to reload */
   3936 	switch (sc->sc_type) {
   3937 	case WM_T_82542_2_0:
   3938 	case WM_T_82542_2_1:
   3939 		/* null */
   3940 		break;
   3941 	case WM_T_82543:
   3942 	case WM_T_82544:
   3943 	case WM_T_82540:
   3944 	case WM_T_82545:
   3945 	case WM_T_82545_3:
   3946 	case WM_T_82546:
   3947 	case WM_T_82546_3:
   3948 	case WM_T_82541:
   3949 	case WM_T_82541_2:
   3950 	case WM_T_82547:
   3951 	case WM_T_82547_2:
   3952 	case WM_T_82573:
   3953 	case WM_T_82574:
   3954 	case WM_T_82583:
   3955 		/* generic */
   3956 		delay(10*1000);
   3957 		break;
   3958 	case WM_T_80003:
   3959 	case WM_T_82571:
   3960 	case WM_T_82572:
   3961 	case WM_T_82575:
   3962 	case WM_T_82576:
   3963 	case WM_T_82580:
   3964 	case WM_T_I350:
   3965 	case WM_T_I354:
   3966 	case WM_T_I210:
   3967 	case WM_T_I211:
   3968 		if (sc->sc_type == WM_T_82571) {
   3969 			/* Only 82571 shares port 0 */
   3970 			mask = EEMNGCTL_CFGDONE_0;
   3971 		} else
   3972 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3973 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3974 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3975 				break;
   3976 			delay(1000);
   3977 		}
   3978 		if (i >= WM_PHY_CFG_TIMEOUT)
   3979 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3980 				device_xname(sc->sc_dev), __func__));
   3981 		break;
   3982 	case WM_T_ICH8:
   3983 	case WM_T_ICH9:
   3984 	case WM_T_ICH10:
   3985 	case WM_T_PCH:
   3986 	case WM_T_PCH2:
   3987 	case WM_T_PCH_LPT:
   3988 	case WM_T_PCH_SPT:
   3989 	case WM_T_PCH_CNP:
   3990 		delay(10*1000);
   3991 		if (sc->sc_type >= WM_T_ICH10)
   3992 			wm_lan_init_done(sc);
   3993 		else
   3994 			wm_get_auto_rd_done(sc);
   3995 
   3996 		/* Clear PHY Reset Asserted bit */
   3997 		reg = CSR_READ(sc, WMREG_STATUS);
   3998 		if ((reg & STATUS_PHYRA) != 0)
   3999 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4000 		break;
   4001 	default:
   4002 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4003 		    __func__);
   4004 		break;
   4005 	}
   4006 }
   4007 
   4008 int
   4009 wm_phy_post_reset(struct wm_softc *sc)
   4010 {
   4011 	uint16_t reg;
   4012 	int rv = 0;
   4013 
   4014 	/* This function is only for ICH8 and newer. */
   4015 	if (sc->sc_type < WM_T_ICH8)
   4016 		return 0;
   4017 
   4018 	if (wm_phy_resetisblocked(sc)) {
   4019 		/* XXX */
   4020 		device_printf(sc->sc_dev, "PHY is blocked\n");
   4021 		return -1;
   4022 	}
   4023 
   4024 	/* Allow time for h/w to get to quiescent state after reset */
   4025 	delay(10*1000);
   4026 
   4027 	/* Perform any necessary post-reset workarounds */
   4028 	if (sc->sc_type == WM_T_PCH)
   4029 		rv = wm_hv_phy_workarounds_ich8lan(sc);
   4030 	else if (sc->sc_type == WM_T_PCH2)
   4031 		rv = wm_lv_phy_workarounds_ich8lan(sc);
   4032 	if (rv != 0)
   4033 		return rv;
   4034 
   4035 	/* Clear the host wakeup bit after lcd reset */
   4036 	if (sc->sc_type >= WM_T_PCH) {
   4037 		wm_gmii_hv_readreg(sc->sc_dev, 2, BM_PORT_GEN_CFG, &reg);
   4038 		reg &= ~BM_WUC_HOST_WU_BIT;
   4039 		wm_gmii_hv_writereg(sc->sc_dev, 2, BM_PORT_GEN_CFG, reg);
   4040 	}
   4041 
   4042 	/* Configure the LCD with the extended configuration region in NVM */
   4043 	if ((rv = wm_init_lcd_from_nvm(sc)) != 0)
   4044 		return rv;
   4045 
   4046 	/* Configure the LCD with the OEM bits in NVM */
   4047 	rv = wm_oem_bits_config_ich8lan(sc, true);
   4048 
   4049 	if (sc->sc_type == WM_T_PCH2) {
   4050 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4051 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4052 			delay(10 * 1000);
   4053 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4054 		}
   4055 		/* XXX Set EEE LPI Update Timer to 200usec */
   4056 	}
   4057 
   4058 	return rv;
   4059 }
   4060 
   4061 /* Only for PCH and newer */
   4062 static int
   4063 wm_write_smbus_addr(struct wm_softc *sc)
   4064 {
   4065 	uint32_t strap, freq;
   4066 	uint16_t phy_data;
   4067 	int rv;
   4068 
   4069 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4070 		device_xname(sc->sc_dev), __func__));
   4071 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4072 
   4073 	strap = CSR_READ(sc, WMREG_STRAP);
   4074 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4075 
   4076 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4077 	if (rv != 0)
   4078 		return -1;
   4079 
   4080 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4081 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4082 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4083 
   4084 	if (sc->sc_phytype == WMPHY_I217) {
   4085 		/* Restore SMBus frequency */
   4086 		if (freq --) {
   4087 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4088 			    | HV_SMB_ADDR_FREQ_HIGH);
   4089 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4090 			    HV_SMB_ADDR_FREQ_LOW);
   4091 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4092 			    HV_SMB_ADDR_FREQ_HIGH);
   4093 		} else
   4094 			DPRINTF(WM_DEBUG_INIT,
   4095 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4096 				device_xname(sc->sc_dev), __func__));
   4097 	}
   4098 
   4099 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4100 	    phy_data);
   4101 }
   4102 
   4103 static int
   4104 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4105 {
   4106 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4107 	uint16_t phy_page = 0;
   4108 	int rv = 0;
   4109 
   4110 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4111 		device_xname(sc->sc_dev), __func__));
   4112 
   4113 	switch (sc->sc_type) {
   4114 	case WM_T_ICH8:
   4115 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4116 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4117 			return 0;
   4118 
   4119 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4120 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4121 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4122 			break;
   4123 		}
   4124 		/* FALLTHROUGH */
   4125 	case WM_T_PCH:
   4126 	case WM_T_PCH2:
   4127 	case WM_T_PCH_LPT:
   4128 	case WM_T_PCH_SPT:
   4129 	case WM_T_PCH_CNP:
   4130 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4131 		break;
   4132 	default:
   4133 		return 0;
   4134 	}
   4135 
   4136 	if ((rv = sc->phy.acquire(sc)) != 0)
   4137 		return rv;
   4138 
   4139 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4140 	if ((reg & sw_cfg_mask) == 0)
   4141 		goto release;
   4142 
   4143 	/*
   4144 	 * Make sure HW does not configure LCD from PHY extended configuration
   4145 	 * before SW configuration
   4146 	 */
   4147 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4148 	if ((sc->sc_type < WM_T_PCH2)
   4149 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4150 		goto release;
   4151 
   4152 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4153 		device_xname(sc->sc_dev), __func__));
   4154 	/* word_addr is in DWORD */
   4155 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4156 
   4157 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4158 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4159 	if (cnf_size == 0)
   4160 		goto release;
   4161 
   4162 	if (((sc->sc_type == WM_T_PCH)
   4163 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4164 	    || (sc->sc_type > WM_T_PCH)) {
   4165 		/*
   4166 		 * HW configures the SMBus address and LEDs when the OEM and
   4167 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4168 		 * are cleared, SW will configure them instead.
   4169 		 */
   4170 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4171 			device_xname(sc->sc_dev), __func__));
   4172 		if ((rv = wm_write_smbus_addr(sc)) != 0)
   4173 			goto release;
   4174 
   4175 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4176 		rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG,
   4177 		    (uint16_t)reg);
   4178 		if (rv != 0)
   4179 			goto release;
   4180 	}
   4181 
   4182 	/* Configure LCD from extended configuration region. */
   4183 	for (i = 0; i < cnf_size; i++) {
   4184 		uint16_t reg_data, reg_addr;
   4185 
   4186 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4187 			goto release;
   4188 
   4189 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4190 			goto release;
   4191 
   4192 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4193 			phy_page = reg_data;
   4194 
   4195 		reg_addr &= IGPHY_MAXREGADDR;
   4196 		reg_addr |= phy_page;
   4197 
   4198 		KASSERT(sc->phy.writereg_locked != NULL);
   4199 		rv = sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr,
   4200 		    reg_data);
   4201 	}
   4202 
   4203 release:
   4204 	sc->phy.release(sc);
   4205 	return rv;
   4206 }
   4207 
   4208 /*
   4209  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4210  *  @sc:       pointer to the HW structure
   4211  *  @d0_state: boolean if entering d0 or d3 device state
   4212  *
   4213  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4214  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4215  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4216  */
   4217 int
   4218 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4219 {
   4220 	uint32_t mac_reg;
   4221 	uint16_t oem_reg;
   4222 	int rv;
   4223 
   4224 	if (sc->sc_type < WM_T_PCH)
   4225 		return 0;
   4226 
   4227 	rv = sc->phy.acquire(sc);
   4228 	if (rv != 0)
   4229 		return rv;
   4230 
   4231 	if (sc->sc_type == WM_T_PCH) {
   4232 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4233 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4234 			goto release;
   4235 	}
   4236 
   4237 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4238 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4239 		goto release;
   4240 
   4241 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4242 
   4243 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4244 	if (rv != 0)
   4245 		goto release;
   4246 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4247 
   4248 	if (d0_state) {
   4249 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4250 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4251 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4252 			oem_reg |= HV_OEM_BITS_LPLU;
   4253 	} else {
   4254 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4255 		    != 0)
   4256 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4257 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4258 		    != 0)
   4259 			oem_reg |= HV_OEM_BITS_LPLU;
   4260 	}
   4261 
   4262 	/* Set Restart auto-neg to activate the bits */
   4263 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4264 	    && (wm_phy_resetisblocked(sc) == false))
   4265 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4266 
   4267 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4268 
   4269 release:
   4270 	sc->phy.release(sc);
   4271 
   4272 	return rv;
   4273 }
   4274 
   4275 /* Init hardware bits */
   4276 void
   4277 wm_initialize_hardware_bits(struct wm_softc *sc)
   4278 {
   4279 	uint32_t tarc0, tarc1, reg;
   4280 
   4281 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4282 		device_xname(sc->sc_dev), __func__));
   4283 
   4284 	/* For 82571 variant, 80003 and ICHs */
   4285 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4286 	    || (sc->sc_type >= WM_T_80003)) {
   4287 
   4288 		/* Transmit Descriptor Control 0 */
   4289 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4290 		reg |= TXDCTL_COUNT_DESC;
   4291 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4292 
   4293 		/* Transmit Descriptor Control 1 */
   4294 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4295 		reg |= TXDCTL_COUNT_DESC;
   4296 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4297 
   4298 		/* TARC0 */
   4299 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4300 		switch (sc->sc_type) {
   4301 		case WM_T_82571:
   4302 		case WM_T_82572:
   4303 		case WM_T_82573:
   4304 		case WM_T_82574:
   4305 		case WM_T_82583:
   4306 		case WM_T_80003:
   4307 			/* Clear bits 30..27 */
   4308 			tarc0 &= ~__BITS(30, 27);
   4309 			break;
   4310 		default:
   4311 			break;
   4312 		}
   4313 
   4314 		switch (sc->sc_type) {
   4315 		case WM_T_82571:
   4316 		case WM_T_82572:
   4317 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4318 
   4319 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4320 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4321 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4322 			/* 8257[12] Errata No.7 */
   4323 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4324 
   4325 			/* TARC1 bit 28 */
   4326 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4327 				tarc1 &= ~__BIT(28);
   4328 			else
   4329 				tarc1 |= __BIT(28);
   4330 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4331 
   4332 			/*
   4333 			 * 8257[12] Errata No.13
   4334 			 * Disable Dyamic Clock Gating.
   4335 			 */
   4336 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4337 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4338 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4339 			break;
   4340 		case WM_T_82573:
   4341 		case WM_T_82574:
   4342 		case WM_T_82583:
   4343 			if ((sc->sc_type == WM_T_82574)
   4344 			    || (sc->sc_type == WM_T_82583))
   4345 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4346 
   4347 			/* Extended Device Control */
   4348 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4349 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4350 			reg |= __BIT(22);	/* Set bit 22 */
   4351 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4352 
   4353 			/* Device Control */
   4354 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4355 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4356 
   4357 			/* PCIe Control Register */
   4358 			/*
   4359 			 * 82573 Errata (unknown).
   4360 			 *
   4361 			 * 82574 Errata 25 and 82583 Errata 12
   4362 			 * "Dropped Rx Packets":
   4363 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4364 			 */
   4365 			reg = CSR_READ(sc, WMREG_GCR);
   4366 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4367 			CSR_WRITE(sc, WMREG_GCR, reg);
   4368 
   4369 			if ((sc->sc_type == WM_T_82574)
   4370 			    || (sc->sc_type == WM_T_82583)) {
   4371 				/*
   4372 				 * Document says this bit must be set for
   4373 				 * proper operation.
   4374 				 */
   4375 				reg = CSR_READ(sc, WMREG_GCR);
   4376 				reg |= __BIT(22);
   4377 				CSR_WRITE(sc, WMREG_GCR, reg);
   4378 
   4379 				/*
   4380 				 * Apply workaround for hardware errata
   4381 				 * documented in errata docs Fixes issue where
   4382 				 * some error prone or unreliable PCIe
   4383 				 * completions are occurring, particularly
   4384 				 * with ASPM enabled. Without fix, issue can
   4385 				 * cause Tx timeouts.
   4386 				 */
   4387 				reg = CSR_READ(sc, WMREG_GCR2);
   4388 				reg |= __BIT(0);
   4389 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4390 			}
   4391 			break;
   4392 		case WM_T_80003:
   4393 			/* TARC0 */
   4394 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4395 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4396 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4397 
   4398 			/* TARC1 bit 28 */
   4399 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4400 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4401 				tarc1 &= ~__BIT(28);
   4402 			else
   4403 				tarc1 |= __BIT(28);
   4404 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4405 			break;
   4406 		case WM_T_ICH8:
   4407 		case WM_T_ICH9:
   4408 		case WM_T_ICH10:
   4409 		case WM_T_PCH:
   4410 		case WM_T_PCH2:
   4411 		case WM_T_PCH_LPT:
   4412 		case WM_T_PCH_SPT:
   4413 		case WM_T_PCH_CNP:
   4414 			/* TARC0 */
   4415 			if (sc->sc_type == WM_T_ICH8) {
   4416 				/* Set TARC0 bits 29 and 28 */
   4417 				tarc0 |= __BITS(29, 28);
   4418 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4419 				tarc0 |= __BIT(29);
   4420 				/*
   4421 				 *  Drop bit 28. From Linux.
   4422 				 * See I218/I219 spec update
   4423 				 * "5. Buffer Overrun While the I219 is
   4424 				 * Processing DMA Transactions"
   4425 				 */
   4426 				tarc0 &= ~__BIT(28);
   4427 			}
   4428 			/* Set TARC0 bits 23,24,26,27 */
   4429 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4430 
   4431 			/* CTRL_EXT */
   4432 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4433 			reg |= __BIT(22);	/* Set bit 22 */
   4434 			/*
   4435 			 * Enable PHY low-power state when MAC is at D3
   4436 			 * w/o WoL
   4437 			 */
   4438 			if (sc->sc_type >= WM_T_PCH)
   4439 				reg |= CTRL_EXT_PHYPDEN;
   4440 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4441 
   4442 			/* TARC1 */
   4443 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4444 			/* bit 28 */
   4445 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4446 				tarc1 &= ~__BIT(28);
   4447 			else
   4448 				tarc1 |= __BIT(28);
   4449 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4450 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4451 
   4452 			/* Device Status */
   4453 			if (sc->sc_type == WM_T_ICH8) {
   4454 				reg = CSR_READ(sc, WMREG_STATUS);
   4455 				reg &= ~__BIT(31);
   4456 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4457 
   4458 			}
   4459 
   4460 			/* IOSFPC */
   4461 			if (sc->sc_type == WM_T_PCH_SPT) {
   4462 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4463 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4464 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4465 			}
   4466 			/*
   4467 			 * Work-around descriptor data corruption issue during
   4468 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4469 			 * capability.
   4470 			 */
   4471 			reg = CSR_READ(sc, WMREG_RFCTL);
   4472 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4473 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4474 			break;
   4475 		default:
   4476 			break;
   4477 		}
   4478 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4479 
   4480 		switch (sc->sc_type) {
   4481 		/*
   4482 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4483 		 * Avoid RSS Hash Value bug.
   4484 		 */
   4485 		case WM_T_82571:
   4486 		case WM_T_82572:
   4487 		case WM_T_82573:
   4488 		case WM_T_80003:
   4489 		case WM_T_ICH8:
   4490 			reg = CSR_READ(sc, WMREG_RFCTL);
   4491 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4492 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4493 			break;
   4494 		case WM_T_82574:
   4495 			/* use extened Rx descriptor. */
   4496 			reg = CSR_READ(sc, WMREG_RFCTL);
   4497 			reg |= WMREG_RFCTL_EXSTEN;
   4498 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4499 			break;
   4500 		default:
   4501 			break;
   4502 		}
   4503 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4504 		/*
   4505 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4506 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4507 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4508 		 * Correctly by the Device"
   4509 		 *
   4510 		 * I354(C2000) Errata AVR53:
   4511 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4512 		 * Hang"
   4513 		 */
   4514 		reg = CSR_READ(sc, WMREG_RFCTL);
   4515 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4516 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4517 	}
   4518 }
   4519 
   4520 static uint32_t
   4521 wm_rxpbs_adjust_82580(uint32_t val)
   4522 {
   4523 	uint32_t rv = 0;
   4524 
   4525 	if (val < __arraycount(wm_82580_rxpbs_table))
   4526 		rv = wm_82580_rxpbs_table[val];
   4527 
   4528 	return rv;
   4529 }
   4530 
   4531 /*
   4532  * wm_reset_phy:
   4533  *
   4534  *	generic PHY reset function.
   4535  *	Same as e1000_phy_hw_reset_generic()
   4536  */
   4537 static int
   4538 wm_reset_phy(struct wm_softc *sc)
   4539 {
   4540 	uint32_t reg;
   4541 
   4542 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4543 		device_xname(sc->sc_dev), __func__));
   4544 	if (wm_phy_resetisblocked(sc))
   4545 		return -1;
   4546 
   4547 	sc->phy.acquire(sc);
   4548 
   4549 	reg = CSR_READ(sc, WMREG_CTRL);
   4550 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4551 	CSR_WRITE_FLUSH(sc);
   4552 
   4553 	delay(sc->phy.reset_delay_us);
   4554 
   4555 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4556 	CSR_WRITE_FLUSH(sc);
   4557 
   4558 	delay(150);
   4559 
   4560 	sc->phy.release(sc);
   4561 
   4562 	wm_get_cfg_done(sc);
   4563 	wm_phy_post_reset(sc);
   4564 
   4565 	return 0;
   4566 }
   4567 
   4568 /*
   4569  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4570  * so it is enough to check sc->sc_queue[0] only.
   4571  */
   4572 static void
   4573 wm_flush_desc_rings(struct wm_softc *sc)
   4574 {
   4575 	pcireg_t preg;
   4576 	uint32_t reg;
   4577 	struct wm_txqueue *txq;
   4578 	wiseman_txdesc_t *txd;
   4579 	int nexttx;
   4580 	uint32_t rctl;
   4581 
   4582 	/* First, disable MULR fix in FEXTNVM11 */
   4583 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4584 	reg |= FEXTNVM11_DIS_MULRFIX;
   4585 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4586 
   4587 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4588 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4589 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4590 		return;
   4591 
   4592 	/* TX */
   4593 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4594 	    device_xname(sc->sc_dev), preg, reg);
   4595 	reg = CSR_READ(sc, WMREG_TCTL);
   4596 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4597 
   4598 	txq = &sc->sc_queue[0].wmq_txq;
   4599 	nexttx = txq->txq_next;
   4600 	txd = &txq->txq_descs[nexttx];
   4601 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4602 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4603 	txd->wtx_fields.wtxu_status = 0;
   4604 	txd->wtx_fields.wtxu_options = 0;
   4605 	txd->wtx_fields.wtxu_vlan = 0;
   4606 
   4607 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4608 	    BUS_SPACE_BARRIER_WRITE);
   4609 
   4610 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4611 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4612 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4613 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4614 	delay(250);
   4615 
   4616 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4617 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4618 		return;
   4619 
   4620 	/* RX */
   4621 	printf("%s: Need RX flush (reg = %08x)\n",
   4622 	    device_xname(sc->sc_dev), preg);
   4623 	rctl = CSR_READ(sc, WMREG_RCTL);
   4624 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4625 	CSR_WRITE_FLUSH(sc);
   4626 	delay(150);
   4627 
   4628 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4629 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4630 	reg &= 0xffffc000;
   4631 	/*
   4632 	 * update thresholds: prefetch threshold to 31, host threshold
   4633 	 * to 1 and make sure the granularity is "descriptors" and not
   4634 	 * "cache lines"
   4635 	 */
   4636 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4637 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4638 
   4639 	/*
   4640 	 * momentarily enable the RX ring for the changes to take
   4641 	 * effect
   4642 	 */
   4643 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4644 	CSR_WRITE_FLUSH(sc);
   4645 	delay(150);
   4646 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4647 }
   4648 
   4649 /*
   4650  * wm_reset:
   4651  *
   4652  *	Reset the i82542 chip.
   4653  */
   4654 static void
   4655 wm_reset(struct wm_softc *sc)
   4656 {
   4657 	int phy_reset = 0;
   4658 	int i, error = 0;
   4659 	uint32_t reg;
   4660 	uint16_t kmreg;
   4661 	int rv;
   4662 
   4663 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4664 		device_xname(sc->sc_dev), __func__));
   4665 	KASSERT(sc->sc_type != 0);
   4666 
   4667 	/*
   4668 	 * Allocate on-chip memory according to the MTU size.
   4669 	 * The Packet Buffer Allocation register must be written
   4670 	 * before the chip is reset.
   4671 	 */
   4672 	switch (sc->sc_type) {
   4673 	case WM_T_82547:
   4674 	case WM_T_82547_2:
   4675 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4676 		    PBA_22K : PBA_30K;
   4677 		for (i = 0; i < sc->sc_nqueues; i++) {
   4678 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4679 			txq->txq_fifo_head = 0;
   4680 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4681 			txq->txq_fifo_size =
   4682 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4683 			txq->txq_fifo_stall = 0;
   4684 		}
   4685 		break;
   4686 	case WM_T_82571:
   4687 	case WM_T_82572:
   4688 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4689 	case WM_T_80003:
   4690 		sc->sc_pba = PBA_32K;
   4691 		break;
   4692 	case WM_T_82573:
   4693 		sc->sc_pba = PBA_12K;
   4694 		break;
   4695 	case WM_T_82574:
   4696 	case WM_T_82583:
   4697 		sc->sc_pba = PBA_20K;
   4698 		break;
   4699 	case WM_T_82576:
   4700 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4701 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4702 		break;
   4703 	case WM_T_82580:
   4704 	case WM_T_I350:
   4705 	case WM_T_I354:
   4706 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4707 		break;
   4708 	case WM_T_I210:
   4709 	case WM_T_I211:
   4710 		sc->sc_pba = PBA_34K;
   4711 		break;
   4712 	case WM_T_ICH8:
   4713 		/* Workaround for a bit corruption issue in FIFO memory */
   4714 		sc->sc_pba = PBA_8K;
   4715 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4716 		break;
   4717 	case WM_T_ICH9:
   4718 	case WM_T_ICH10:
   4719 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4720 		    PBA_14K : PBA_10K;
   4721 		break;
   4722 	case WM_T_PCH:
   4723 	case WM_T_PCH2:	/* XXX 14K? */
   4724 	case WM_T_PCH_LPT:
   4725 	case WM_T_PCH_SPT:
   4726 	case WM_T_PCH_CNP:
   4727 		sc->sc_pba = PBA_26K;
   4728 		break;
   4729 	default:
   4730 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4731 		    PBA_40K : PBA_48K;
   4732 		break;
   4733 	}
   4734 	/*
   4735 	 * Only old or non-multiqueue devices have the PBA register
   4736 	 * XXX Need special handling for 82575.
   4737 	 */
   4738 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4739 	    || (sc->sc_type == WM_T_82575))
   4740 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4741 
   4742 	/* Prevent the PCI-E bus from sticking */
   4743 	if (sc->sc_flags & WM_F_PCIE) {
   4744 		int timeout = 800;
   4745 
   4746 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4747 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4748 
   4749 		while (timeout--) {
   4750 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4751 			    == 0)
   4752 				break;
   4753 			delay(100);
   4754 		}
   4755 		if (timeout == 0)
   4756 			device_printf(sc->sc_dev,
   4757 			    "failed to disable busmastering\n");
   4758 	}
   4759 
   4760 	/* Set the completion timeout for interface */
   4761 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4762 	    || (sc->sc_type == WM_T_82580)
   4763 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4764 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4765 		wm_set_pcie_completion_timeout(sc);
   4766 
   4767 	/* Clear interrupt */
   4768 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4769 	if (wm_is_using_msix(sc)) {
   4770 		if (sc->sc_type != WM_T_82574) {
   4771 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4772 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4773 		} else
   4774 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4775 	}
   4776 
   4777 	/* Stop the transmit and receive processes. */
   4778 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4779 	sc->sc_rctl &= ~RCTL_EN;
   4780 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4781 	CSR_WRITE_FLUSH(sc);
   4782 
   4783 	/* XXX set_tbi_sbp_82543() */
   4784 
   4785 	delay(10*1000);
   4786 
   4787 	/* Must acquire the MDIO ownership before MAC reset */
   4788 	switch (sc->sc_type) {
   4789 	case WM_T_82573:
   4790 	case WM_T_82574:
   4791 	case WM_T_82583:
   4792 		error = wm_get_hw_semaphore_82573(sc);
   4793 		break;
   4794 	default:
   4795 		break;
   4796 	}
   4797 
   4798 	/*
   4799 	 * 82541 Errata 29? & 82547 Errata 28?
   4800 	 * See also the description about PHY_RST bit in CTRL register
   4801 	 * in 8254x_GBe_SDM.pdf.
   4802 	 */
   4803 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4804 		CSR_WRITE(sc, WMREG_CTRL,
   4805 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4806 		CSR_WRITE_FLUSH(sc);
   4807 		delay(5000);
   4808 	}
   4809 
   4810 	switch (sc->sc_type) {
   4811 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4812 	case WM_T_82541:
   4813 	case WM_T_82541_2:
   4814 	case WM_T_82547:
   4815 	case WM_T_82547_2:
   4816 		/*
   4817 		 * On some chipsets, a reset through a memory-mapped write
   4818 		 * cycle can cause the chip to reset before completing the
   4819 		 * write cycle. This causes major headache that can be avoided
   4820 		 * by issuing the reset via indirect register writes through
   4821 		 * I/O space.
   4822 		 *
   4823 		 * So, if we successfully mapped the I/O BAR at attach time,
   4824 		 * use that. Otherwise, try our luck with a memory-mapped
   4825 		 * reset.
   4826 		 */
   4827 		if (sc->sc_flags & WM_F_IOH_VALID)
   4828 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4829 		else
   4830 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4831 		break;
   4832 	case WM_T_82545_3:
   4833 	case WM_T_82546_3:
   4834 		/* Use the shadow control register on these chips. */
   4835 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4836 		break;
   4837 	case WM_T_80003:
   4838 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4839 		sc->phy.acquire(sc);
   4840 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4841 		sc->phy.release(sc);
   4842 		break;
   4843 	case WM_T_ICH8:
   4844 	case WM_T_ICH9:
   4845 	case WM_T_ICH10:
   4846 	case WM_T_PCH:
   4847 	case WM_T_PCH2:
   4848 	case WM_T_PCH_LPT:
   4849 	case WM_T_PCH_SPT:
   4850 	case WM_T_PCH_CNP:
   4851 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4852 		if (wm_phy_resetisblocked(sc) == false) {
   4853 			/*
   4854 			 * Gate automatic PHY configuration by hardware on
   4855 			 * non-managed 82579
   4856 			 */
   4857 			if ((sc->sc_type == WM_T_PCH2)
   4858 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4859 				== 0))
   4860 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4861 
   4862 			reg |= CTRL_PHY_RESET;
   4863 			phy_reset = 1;
   4864 		} else
   4865 			printf("XXX reset is blocked!!!\n");
   4866 		sc->phy.acquire(sc);
   4867 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4868 		/* Don't insert a completion barrier when reset */
   4869 		delay(20*1000);
   4870 		mutex_exit(sc->sc_ich_phymtx);
   4871 		break;
   4872 	case WM_T_82580:
   4873 	case WM_T_I350:
   4874 	case WM_T_I354:
   4875 	case WM_T_I210:
   4876 	case WM_T_I211:
   4877 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4878 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4879 			CSR_WRITE_FLUSH(sc);
   4880 		delay(5000);
   4881 		break;
   4882 	case WM_T_82542_2_0:
   4883 	case WM_T_82542_2_1:
   4884 	case WM_T_82543:
   4885 	case WM_T_82540:
   4886 	case WM_T_82545:
   4887 	case WM_T_82546:
   4888 	case WM_T_82571:
   4889 	case WM_T_82572:
   4890 	case WM_T_82573:
   4891 	case WM_T_82574:
   4892 	case WM_T_82575:
   4893 	case WM_T_82576:
   4894 	case WM_T_82583:
   4895 	default:
   4896 		/* Everything else can safely use the documented method. */
   4897 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4898 		break;
   4899 	}
   4900 
   4901 	/* Must release the MDIO ownership after MAC reset */
   4902 	switch (sc->sc_type) {
   4903 	case WM_T_82573:
   4904 	case WM_T_82574:
   4905 	case WM_T_82583:
   4906 		if (error == 0)
   4907 			wm_put_hw_semaphore_82573(sc);
   4908 		break;
   4909 	default:
   4910 		break;
   4911 	}
   4912 
   4913 	/* Set Phy Config Counter to 50msec */
   4914 	if (sc->sc_type == WM_T_PCH2) {
   4915 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4916 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4917 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4918 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4919 	}
   4920 
   4921 	if (phy_reset != 0)
   4922 		wm_get_cfg_done(sc);
   4923 
   4924 	/* reload EEPROM */
   4925 	switch (sc->sc_type) {
   4926 	case WM_T_82542_2_0:
   4927 	case WM_T_82542_2_1:
   4928 	case WM_T_82543:
   4929 	case WM_T_82544:
   4930 		delay(10);
   4931 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4932 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4933 		CSR_WRITE_FLUSH(sc);
   4934 		delay(2000);
   4935 		break;
   4936 	case WM_T_82540:
   4937 	case WM_T_82545:
   4938 	case WM_T_82545_3:
   4939 	case WM_T_82546:
   4940 	case WM_T_82546_3:
   4941 		delay(5*1000);
   4942 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4943 		break;
   4944 	case WM_T_82541:
   4945 	case WM_T_82541_2:
   4946 	case WM_T_82547:
   4947 	case WM_T_82547_2:
   4948 		delay(20000);
   4949 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4950 		break;
   4951 	case WM_T_82571:
   4952 	case WM_T_82572:
   4953 	case WM_T_82573:
   4954 	case WM_T_82574:
   4955 	case WM_T_82583:
   4956 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4957 			delay(10);
   4958 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4959 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4960 			CSR_WRITE_FLUSH(sc);
   4961 		}
   4962 		/* check EECD_EE_AUTORD */
   4963 		wm_get_auto_rd_done(sc);
   4964 		/*
   4965 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4966 		 * is set.
   4967 		 */
   4968 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4969 		    || (sc->sc_type == WM_T_82583))
   4970 			delay(25*1000);
   4971 		break;
   4972 	case WM_T_82575:
   4973 	case WM_T_82576:
   4974 	case WM_T_82580:
   4975 	case WM_T_I350:
   4976 	case WM_T_I354:
   4977 	case WM_T_I210:
   4978 	case WM_T_I211:
   4979 	case WM_T_80003:
   4980 		/* check EECD_EE_AUTORD */
   4981 		wm_get_auto_rd_done(sc);
   4982 		break;
   4983 	case WM_T_ICH8:
   4984 	case WM_T_ICH9:
   4985 	case WM_T_ICH10:
   4986 	case WM_T_PCH:
   4987 	case WM_T_PCH2:
   4988 	case WM_T_PCH_LPT:
   4989 	case WM_T_PCH_SPT:
   4990 	case WM_T_PCH_CNP:
   4991 		break;
   4992 	default:
   4993 		panic("%s: unknown type\n", __func__);
   4994 	}
   4995 
   4996 	/* Check whether EEPROM is present or not */
   4997 	switch (sc->sc_type) {
   4998 	case WM_T_82575:
   4999 	case WM_T_82576:
   5000 	case WM_T_82580:
   5001 	case WM_T_I350:
   5002 	case WM_T_I354:
   5003 	case WM_T_ICH8:
   5004 	case WM_T_ICH9:
   5005 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   5006 			/* Not found */
   5007 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   5008 			if (sc->sc_type == WM_T_82575)
   5009 				wm_reset_init_script_82575(sc);
   5010 		}
   5011 		break;
   5012 	default:
   5013 		break;
   5014 	}
   5015 
   5016 	if (phy_reset != 0)
   5017 		wm_phy_post_reset(sc);
   5018 
   5019 	if ((sc->sc_type == WM_T_82580)
   5020 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5021 		/* clear global device reset status bit */
   5022 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5023 	}
   5024 
   5025 	/* Clear any pending interrupt events. */
   5026 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5027 	reg = CSR_READ(sc, WMREG_ICR);
   5028 	if (wm_is_using_msix(sc)) {
   5029 		if (sc->sc_type != WM_T_82574) {
   5030 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5031 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5032 		} else
   5033 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5034 	}
   5035 
   5036 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5037 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5038 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5039 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5040 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5041 		reg |= KABGTXD_BGSQLBIAS;
   5042 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5043 	}
   5044 
   5045 	/* reload sc_ctrl */
   5046 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5047 
   5048 	wm_set_eee(sc);
   5049 
   5050 	/*
   5051 	 * For PCH, this write will make sure that any noise will be detected
   5052 	 * as a CRC error and be dropped rather than show up as a bad packet
   5053 	 * to the DMA engine
   5054 	 */
   5055 	if (sc->sc_type == WM_T_PCH)
   5056 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5057 
   5058 	if (sc->sc_type >= WM_T_82544)
   5059 		CSR_WRITE(sc, WMREG_WUC, 0);
   5060 
   5061 	if (sc->sc_type < WM_T_82575)
   5062 		wm_disable_aspm(sc); /* Workaround for some chips */
   5063 
   5064 	wm_reset_mdicnfg_82580(sc);
   5065 
   5066 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5067 		wm_pll_workaround_i210(sc);
   5068 
   5069 	if (sc->sc_type == WM_T_80003) {
   5070 		/* default to TRUE to enable the MDIC W/A */
   5071 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5072 
   5073 		rv = wm_kmrn_readreg(sc,
   5074 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5075 		if (rv == 0) {
   5076 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5077 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5078 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5079 			else
   5080 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5081 		}
   5082 	}
   5083 }
   5084 
   5085 /*
   5086  * wm_add_rxbuf:
   5087  *
   5088  *	Add a receive buffer to the indiciated descriptor.
   5089  */
   5090 static int
   5091 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5092 {
   5093 	struct wm_softc *sc = rxq->rxq_sc;
   5094 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5095 	struct mbuf *m;
   5096 	int error;
   5097 
   5098 	KASSERT(mutex_owned(rxq->rxq_lock));
   5099 
   5100 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5101 	if (m == NULL)
   5102 		return ENOBUFS;
   5103 
   5104 	MCLGET(m, M_DONTWAIT);
   5105 	if ((m->m_flags & M_EXT) == 0) {
   5106 		m_freem(m);
   5107 		return ENOBUFS;
   5108 	}
   5109 
   5110 	if (rxs->rxs_mbuf != NULL)
   5111 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5112 
   5113 	rxs->rxs_mbuf = m;
   5114 
   5115 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5116 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   5117 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   5118 	if (error) {
   5119 		/* XXX XXX XXX */
   5120 		aprint_error_dev(sc->sc_dev,
   5121 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5122 		panic("wm_add_rxbuf");
   5123 	}
   5124 
   5125 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5126 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5127 
   5128 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5129 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5130 			wm_init_rxdesc(rxq, idx);
   5131 	} else
   5132 		wm_init_rxdesc(rxq, idx);
   5133 
   5134 	return 0;
   5135 }
   5136 
   5137 /*
   5138  * wm_rxdrain:
   5139  *
   5140  *	Drain the receive queue.
   5141  */
   5142 static void
   5143 wm_rxdrain(struct wm_rxqueue *rxq)
   5144 {
   5145 	struct wm_softc *sc = rxq->rxq_sc;
   5146 	struct wm_rxsoft *rxs;
   5147 	int i;
   5148 
   5149 	KASSERT(mutex_owned(rxq->rxq_lock));
   5150 
   5151 	for (i = 0; i < WM_NRXDESC; i++) {
   5152 		rxs = &rxq->rxq_soft[i];
   5153 		if (rxs->rxs_mbuf != NULL) {
   5154 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5155 			m_freem(rxs->rxs_mbuf);
   5156 			rxs->rxs_mbuf = NULL;
   5157 		}
   5158 	}
   5159 }
   5160 
   5161 /*
   5162  * Setup registers for RSS.
   5163  *
   5164  * XXX not yet VMDq support
   5165  */
   5166 static void
   5167 wm_init_rss(struct wm_softc *sc)
   5168 {
   5169 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5170 	int i;
   5171 
   5172 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5173 
   5174 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5175 		int qid, reta_ent;
   5176 
   5177 		qid  = i % sc->sc_nqueues;
   5178 		switch (sc->sc_type) {
   5179 		case WM_T_82574:
   5180 			reta_ent = __SHIFTIN(qid,
   5181 			    RETA_ENT_QINDEX_MASK_82574);
   5182 			break;
   5183 		case WM_T_82575:
   5184 			reta_ent = __SHIFTIN(qid,
   5185 			    RETA_ENT_QINDEX1_MASK_82575);
   5186 			break;
   5187 		default:
   5188 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5189 			break;
   5190 		}
   5191 
   5192 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5193 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5194 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5195 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5196 	}
   5197 
   5198 	rss_getkey((uint8_t *)rss_key);
   5199 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5200 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5201 
   5202 	if (sc->sc_type == WM_T_82574)
   5203 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5204 	else
   5205 		mrqc = MRQC_ENABLE_RSS_MQ;
   5206 
   5207 	/*
   5208 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5209 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5210 	 */
   5211 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5212 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5213 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5214 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5215 
   5216 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5217 }
   5218 
   5219 /*
   5220  * Adjust TX and RX queue numbers which the system actulally uses.
   5221  *
   5222  * The numbers are affected by below parameters.
   5223  *     - The nubmer of hardware queues
   5224  *     - The number of MSI-X vectors (= "nvectors" argument)
   5225  *     - ncpu
   5226  */
   5227 static void
   5228 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5229 {
   5230 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5231 
   5232 	if (nvectors < 2) {
   5233 		sc->sc_nqueues = 1;
   5234 		return;
   5235 	}
   5236 
   5237 	switch (sc->sc_type) {
   5238 	case WM_T_82572:
   5239 		hw_ntxqueues = 2;
   5240 		hw_nrxqueues = 2;
   5241 		break;
   5242 	case WM_T_82574:
   5243 		hw_ntxqueues = 2;
   5244 		hw_nrxqueues = 2;
   5245 		break;
   5246 	case WM_T_82575:
   5247 		hw_ntxqueues = 4;
   5248 		hw_nrxqueues = 4;
   5249 		break;
   5250 	case WM_T_82576:
   5251 		hw_ntxqueues = 16;
   5252 		hw_nrxqueues = 16;
   5253 		break;
   5254 	case WM_T_82580:
   5255 	case WM_T_I350:
   5256 	case WM_T_I354:
   5257 		hw_ntxqueues = 8;
   5258 		hw_nrxqueues = 8;
   5259 		break;
   5260 	case WM_T_I210:
   5261 		hw_ntxqueues = 4;
   5262 		hw_nrxqueues = 4;
   5263 		break;
   5264 	case WM_T_I211:
   5265 		hw_ntxqueues = 2;
   5266 		hw_nrxqueues = 2;
   5267 		break;
   5268 		/*
   5269 		 * As below ethernet controllers does not support MSI-X,
   5270 		 * this driver let them not use multiqueue.
   5271 		 *     - WM_T_80003
   5272 		 *     - WM_T_ICH8
   5273 		 *     - WM_T_ICH9
   5274 		 *     - WM_T_ICH10
   5275 		 *     - WM_T_PCH
   5276 		 *     - WM_T_PCH2
   5277 		 *     - WM_T_PCH_LPT
   5278 		 */
   5279 	default:
   5280 		hw_ntxqueues = 1;
   5281 		hw_nrxqueues = 1;
   5282 		break;
   5283 	}
   5284 
   5285 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5286 
   5287 	/*
   5288 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5289 	 * the number of queues used actually.
   5290 	 */
   5291 	if (nvectors < hw_nqueues + 1)
   5292 		sc->sc_nqueues = nvectors - 1;
   5293 	else
   5294 		sc->sc_nqueues = hw_nqueues;
   5295 
   5296 	/*
   5297 	 * As queues more then cpus cannot improve scaling, we limit
   5298 	 * the number of queues used actually.
   5299 	 */
   5300 	if (ncpu < sc->sc_nqueues)
   5301 		sc->sc_nqueues = ncpu;
   5302 }
   5303 
   5304 static inline bool
   5305 wm_is_using_msix(struct wm_softc *sc)
   5306 {
   5307 
   5308 	return (sc->sc_nintrs > 1);
   5309 }
   5310 
   5311 static inline bool
   5312 wm_is_using_multiqueue(struct wm_softc *sc)
   5313 {
   5314 
   5315 	return (sc->sc_nqueues > 1);
   5316 }
   5317 
   5318 static int
   5319 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5320 {
   5321 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5322 	wmq->wmq_id = qidx;
   5323 	wmq->wmq_intr_idx = intr_idx;
   5324 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5325 #ifdef WM_MPSAFE
   5326 	    | SOFTINT_MPSAFE
   5327 #endif
   5328 	    , wm_handle_queue, wmq);
   5329 	if (wmq->wmq_si != NULL)
   5330 		return 0;
   5331 
   5332 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5333 	    wmq->wmq_id);
   5334 
   5335 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5336 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5337 	return ENOMEM;
   5338 }
   5339 
   5340 /*
   5341  * Both single interrupt MSI and INTx can use this function.
   5342  */
   5343 static int
   5344 wm_setup_legacy(struct wm_softc *sc)
   5345 {
   5346 	pci_chipset_tag_t pc = sc->sc_pc;
   5347 	const char *intrstr = NULL;
   5348 	char intrbuf[PCI_INTRSTR_LEN];
   5349 	int error;
   5350 
   5351 	error = wm_alloc_txrx_queues(sc);
   5352 	if (error) {
   5353 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5354 		    error);
   5355 		return ENOMEM;
   5356 	}
   5357 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5358 	    sizeof(intrbuf));
   5359 #ifdef WM_MPSAFE
   5360 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5361 #endif
   5362 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5363 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5364 	if (sc->sc_ihs[0] == NULL) {
   5365 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5366 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5367 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5368 		return ENOMEM;
   5369 	}
   5370 
   5371 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5372 	sc->sc_nintrs = 1;
   5373 
   5374 	return wm_softint_establish(sc, 0, 0);
   5375 }
   5376 
   5377 static int
   5378 wm_setup_msix(struct wm_softc *sc)
   5379 {
   5380 	void *vih;
   5381 	kcpuset_t *affinity;
   5382 	int qidx, error, intr_idx, txrx_established;
   5383 	pci_chipset_tag_t pc = sc->sc_pc;
   5384 	const char *intrstr = NULL;
   5385 	char intrbuf[PCI_INTRSTR_LEN];
   5386 	char intr_xname[INTRDEVNAMEBUF];
   5387 
   5388 	if (sc->sc_nqueues < ncpu) {
   5389 		/*
   5390 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5391 		 * interrupts start from CPU#1.
   5392 		 */
   5393 		sc->sc_affinity_offset = 1;
   5394 	} else {
   5395 		/*
   5396 		 * In this case, this device use all CPUs. So, we unify
   5397 		 * affinitied cpu_index to msix vector number for readability.
   5398 		 */
   5399 		sc->sc_affinity_offset = 0;
   5400 	}
   5401 
   5402 	error = wm_alloc_txrx_queues(sc);
   5403 	if (error) {
   5404 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5405 		    error);
   5406 		return ENOMEM;
   5407 	}
   5408 
   5409 	kcpuset_create(&affinity, false);
   5410 	intr_idx = 0;
   5411 
   5412 	/*
   5413 	 * TX and RX
   5414 	 */
   5415 	txrx_established = 0;
   5416 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5417 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5418 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5419 
   5420 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5421 		    sizeof(intrbuf));
   5422 #ifdef WM_MPSAFE
   5423 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5424 		    PCI_INTR_MPSAFE, true);
   5425 #endif
   5426 		memset(intr_xname, 0, sizeof(intr_xname));
   5427 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5428 		    device_xname(sc->sc_dev), qidx);
   5429 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5430 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5431 		if (vih == NULL) {
   5432 			aprint_error_dev(sc->sc_dev,
   5433 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5434 			    intrstr ? " at " : "",
   5435 			    intrstr ? intrstr : "");
   5436 
   5437 			goto fail;
   5438 		}
   5439 		kcpuset_zero(affinity);
   5440 		/* Round-robin affinity */
   5441 		kcpuset_set(affinity, affinity_to);
   5442 		error = interrupt_distribute(vih, affinity, NULL);
   5443 		if (error == 0) {
   5444 			aprint_normal_dev(sc->sc_dev,
   5445 			    "for TX and RX interrupting at %s affinity to %u\n",
   5446 			    intrstr, affinity_to);
   5447 		} else {
   5448 			aprint_normal_dev(sc->sc_dev,
   5449 			    "for TX and RX interrupting at %s\n", intrstr);
   5450 		}
   5451 		sc->sc_ihs[intr_idx] = vih;
   5452 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5453 			goto fail;
   5454 		txrx_established++;
   5455 		intr_idx++;
   5456 	}
   5457 
   5458 	/*
   5459 	 * LINK
   5460 	 */
   5461 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5462 	    sizeof(intrbuf));
   5463 #ifdef WM_MPSAFE
   5464 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5465 #endif
   5466 	memset(intr_xname, 0, sizeof(intr_xname));
   5467 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5468 	    device_xname(sc->sc_dev));
   5469 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5470 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5471 	if (vih == NULL) {
   5472 		aprint_error_dev(sc->sc_dev,
   5473 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5474 		    intrstr ? " at " : "",
   5475 		    intrstr ? intrstr : "");
   5476 
   5477 		goto fail;
   5478 	}
   5479 	/* keep default affinity to LINK interrupt */
   5480 	aprint_normal_dev(sc->sc_dev,
   5481 	    "for LINK interrupting at %s\n", intrstr);
   5482 	sc->sc_ihs[intr_idx] = vih;
   5483 	sc->sc_link_intr_idx = intr_idx;
   5484 
   5485 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5486 	kcpuset_destroy(affinity);
   5487 	return 0;
   5488 
   5489  fail:
   5490 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5491 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5492 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5493 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5494 	}
   5495 
   5496 	kcpuset_destroy(affinity);
   5497 	return ENOMEM;
   5498 }
   5499 
   5500 static void
   5501 wm_unset_stopping_flags(struct wm_softc *sc)
   5502 {
   5503 	int i;
   5504 
   5505 	KASSERT(WM_CORE_LOCKED(sc));
   5506 
   5507 	/*
   5508 	 * must unset stopping flags in ascending order.
   5509 	 */
   5510 	for (i = 0; i < sc->sc_nqueues; i++) {
   5511 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5512 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5513 
   5514 		mutex_enter(txq->txq_lock);
   5515 		txq->txq_stopping = false;
   5516 		mutex_exit(txq->txq_lock);
   5517 
   5518 		mutex_enter(rxq->rxq_lock);
   5519 		rxq->rxq_stopping = false;
   5520 		mutex_exit(rxq->rxq_lock);
   5521 	}
   5522 
   5523 	sc->sc_core_stopping = false;
   5524 }
   5525 
   5526 static void
   5527 wm_set_stopping_flags(struct wm_softc *sc)
   5528 {
   5529 	int i;
   5530 
   5531 	KASSERT(WM_CORE_LOCKED(sc));
   5532 
   5533 	sc->sc_core_stopping = true;
   5534 
   5535 	/*
   5536 	 * must set stopping flags in ascending order.
   5537 	 */
   5538 	for (i = 0; i < sc->sc_nqueues; i++) {
   5539 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5540 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5541 
   5542 		mutex_enter(rxq->rxq_lock);
   5543 		rxq->rxq_stopping = true;
   5544 		mutex_exit(rxq->rxq_lock);
   5545 
   5546 		mutex_enter(txq->txq_lock);
   5547 		txq->txq_stopping = true;
   5548 		mutex_exit(txq->txq_lock);
   5549 	}
   5550 }
   5551 
   5552 /*
   5553  * write interrupt interval value to ITR or EITR
   5554  */
   5555 static void
   5556 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5557 {
   5558 
   5559 	if (!wmq->wmq_set_itr)
   5560 		return;
   5561 
   5562 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5563 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5564 
   5565 		/*
   5566 		 * 82575 doesn't have CNT_INGR field.
   5567 		 * So, overwrite counter field by software.
   5568 		 */
   5569 		if (sc->sc_type == WM_T_82575)
   5570 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5571 		else
   5572 			eitr |= EITR_CNT_INGR;
   5573 
   5574 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5575 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5576 		/*
   5577 		 * 82574 has both ITR and EITR. SET EITR when we use
   5578 		 * the multi queue function with MSI-X.
   5579 		 */
   5580 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5581 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5582 	} else {
   5583 		KASSERT(wmq->wmq_id == 0);
   5584 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5585 	}
   5586 
   5587 	wmq->wmq_set_itr = false;
   5588 }
   5589 
   5590 /*
   5591  * TODO
   5592  * Below dynamic calculation of itr is almost the same as linux igb,
   5593  * however it does not fit to wm(4). So, we will have been disable AIM
   5594  * until we will find appropriate calculation of itr.
   5595  */
   5596 /*
   5597  * calculate interrupt interval value to be going to write register in
   5598  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5599  */
   5600 static void
   5601 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5602 {
   5603 #ifdef NOTYET
   5604 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5605 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5606 	uint32_t avg_size = 0;
   5607 	uint32_t new_itr;
   5608 
   5609 	if (rxq->rxq_packets)
   5610 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5611 	if (txq->txq_packets)
   5612 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5613 
   5614 	if (avg_size == 0) {
   5615 		new_itr = 450; /* restore default value */
   5616 		goto out;
   5617 	}
   5618 
   5619 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5620 	avg_size += 24;
   5621 
   5622 	/* Don't starve jumbo frames */
   5623 	avg_size = uimin(avg_size, 3000);
   5624 
   5625 	/* Give a little boost to mid-size frames */
   5626 	if ((avg_size > 300) && (avg_size < 1200))
   5627 		new_itr = avg_size / 3;
   5628 	else
   5629 		new_itr = avg_size / 2;
   5630 
   5631 out:
   5632 	/*
   5633 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5634 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5635 	 */
   5636 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5637 		new_itr *= 4;
   5638 
   5639 	if (new_itr != wmq->wmq_itr) {
   5640 		wmq->wmq_itr = new_itr;
   5641 		wmq->wmq_set_itr = true;
   5642 	} else
   5643 		wmq->wmq_set_itr = false;
   5644 
   5645 	rxq->rxq_packets = 0;
   5646 	rxq->rxq_bytes = 0;
   5647 	txq->txq_packets = 0;
   5648 	txq->txq_bytes = 0;
   5649 #endif
   5650 }
   5651 
   5652 /*
   5653  * wm_init:		[ifnet interface function]
   5654  *
   5655  *	Initialize the interface.
   5656  */
   5657 static int
   5658 wm_init(struct ifnet *ifp)
   5659 {
   5660 	struct wm_softc *sc = ifp->if_softc;
   5661 	int ret;
   5662 
   5663 	WM_CORE_LOCK(sc);
   5664 	ret = wm_init_locked(ifp);
   5665 	WM_CORE_UNLOCK(sc);
   5666 
   5667 	return ret;
   5668 }
   5669 
   5670 static int
   5671 wm_init_locked(struct ifnet *ifp)
   5672 {
   5673 	struct wm_softc *sc = ifp->if_softc;
   5674 	struct ethercom *ec = &sc->sc_ethercom;
   5675 	int i, j, trynum, error = 0;
   5676 	uint32_t reg;
   5677 
   5678 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5679 		device_xname(sc->sc_dev), __func__));
   5680 	KASSERT(WM_CORE_LOCKED(sc));
   5681 
   5682 	/*
   5683 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5684 	 * There is a small but measurable benefit to avoiding the adjusment
   5685 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5686 	 * on such platforms.  One possibility is that the DMA itself is
   5687 	 * slightly more efficient if the front of the entire packet (instead
   5688 	 * of the front of the headers) is aligned.
   5689 	 *
   5690 	 * Note we must always set align_tweak to 0 if we are using
   5691 	 * jumbo frames.
   5692 	 */
   5693 #ifdef __NO_STRICT_ALIGNMENT
   5694 	sc->sc_align_tweak = 0;
   5695 #else
   5696 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5697 		sc->sc_align_tweak = 0;
   5698 	else
   5699 		sc->sc_align_tweak = 2;
   5700 #endif /* __NO_STRICT_ALIGNMENT */
   5701 
   5702 	/* Cancel any pending I/O. */
   5703 	wm_stop_locked(ifp, 0);
   5704 
   5705 	/* update statistics before reset */
   5706 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5707 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5708 
   5709 	/* PCH_SPT hardware workaround */
   5710 	if (sc->sc_type == WM_T_PCH_SPT)
   5711 		wm_flush_desc_rings(sc);
   5712 
   5713 	/* Reset the chip to a known state. */
   5714 	wm_reset(sc);
   5715 
   5716 	/*
   5717 	 * AMT based hardware can now take control from firmware
   5718 	 * Do this after reset.
   5719 	 */
   5720 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5721 		wm_get_hw_control(sc);
   5722 
   5723 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5724 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5725 		wm_legacy_irq_quirk_spt(sc);
   5726 
   5727 	/* Init hardware bits */
   5728 	wm_initialize_hardware_bits(sc);
   5729 
   5730 	/* Reset the PHY. */
   5731 	if (sc->sc_flags & WM_F_HAS_MII)
   5732 		wm_gmii_reset(sc);
   5733 
   5734 	if (sc->sc_type >= WM_T_ICH8) {
   5735 		reg = CSR_READ(sc, WMREG_GCR);
   5736 		/*
   5737 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5738 		 * default after reset.
   5739 		 */
   5740 		if (sc->sc_type == WM_T_ICH8)
   5741 			reg |= GCR_NO_SNOOP_ALL;
   5742 		else
   5743 			reg &= ~GCR_NO_SNOOP_ALL;
   5744 		CSR_WRITE(sc, WMREG_GCR, reg);
   5745 	}
   5746 	if ((sc->sc_type >= WM_T_ICH8)
   5747 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5748 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5749 
   5750 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5751 		reg |= CTRL_EXT_RO_DIS;
   5752 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5753 	}
   5754 
   5755 	/* Calculate (E)ITR value */
   5756 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5757 		/*
   5758 		 * For NEWQUEUE's EITR (except for 82575).
   5759 		 * 82575's EITR should be set same throttling value as other
   5760 		 * old controllers' ITR because the interrupt/sec calculation
   5761 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5762 		 *
   5763 		 * 82574's EITR should be set same throttling value as ITR.
   5764 		 *
   5765 		 * For N interrupts/sec, set this value to:
   5766 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5767 		 */
   5768 		sc->sc_itr_init = 450;
   5769 	} else if (sc->sc_type >= WM_T_82543) {
   5770 		/*
   5771 		 * Set up the interrupt throttling register (units of 256ns)
   5772 		 * Note that a footnote in Intel's documentation says this
   5773 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5774 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5775 		 * that that is also true for the 1024ns units of the other
   5776 		 * interrupt-related timer registers -- so, really, we ought
   5777 		 * to divide this value by 4 when the link speed is low.
   5778 		 *
   5779 		 * XXX implement this division at link speed change!
   5780 		 */
   5781 
   5782 		/*
   5783 		 * For N interrupts/sec, set this value to:
   5784 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5785 		 * absolute and packet timer values to this value
   5786 		 * divided by 4 to get "simple timer" behavior.
   5787 		 */
   5788 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5789 	}
   5790 
   5791 	error = wm_init_txrx_queues(sc);
   5792 	if (error)
   5793 		goto out;
   5794 
   5795 	/*
   5796 	 * Clear out the VLAN table -- we don't use it (yet).
   5797 	 */
   5798 	CSR_WRITE(sc, WMREG_VET, 0);
   5799 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5800 		trynum = 10; /* Due to hw errata */
   5801 	else
   5802 		trynum = 1;
   5803 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5804 		for (j = 0; j < trynum; j++)
   5805 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5806 
   5807 	/*
   5808 	 * Set up flow-control parameters.
   5809 	 *
   5810 	 * XXX Values could probably stand some tuning.
   5811 	 */
   5812 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5813 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5814 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5815 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5816 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5817 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5818 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5819 	}
   5820 
   5821 	sc->sc_fcrtl = FCRTL_DFLT;
   5822 	if (sc->sc_type < WM_T_82543) {
   5823 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5824 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5825 	} else {
   5826 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5827 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5828 	}
   5829 
   5830 	if (sc->sc_type == WM_T_80003)
   5831 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5832 	else
   5833 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5834 
   5835 	/* Writes the control register. */
   5836 	wm_set_vlan(sc);
   5837 
   5838 	if (sc->sc_flags & WM_F_HAS_MII) {
   5839 		uint16_t kmreg;
   5840 
   5841 		switch (sc->sc_type) {
   5842 		case WM_T_80003:
   5843 		case WM_T_ICH8:
   5844 		case WM_T_ICH9:
   5845 		case WM_T_ICH10:
   5846 		case WM_T_PCH:
   5847 		case WM_T_PCH2:
   5848 		case WM_T_PCH_LPT:
   5849 		case WM_T_PCH_SPT:
   5850 		case WM_T_PCH_CNP:
   5851 			/*
   5852 			 * Set the mac to wait the maximum time between each
   5853 			 * iteration and increase the max iterations when
   5854 			 * polling the phy; this fixes erroneous timeouts at
   5855 			 * 10Mbps.
   5856 			 */
   5857 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5858 			    0xFFFF);
   5859 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5860 			    &kmreg);
   5861 			kmreg |= 0x3F;
   5862 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5863 			    kmreg);
   5864 			break;
   5865 		default:
   5866 			break;
   5867 		}
   5868 
   5869 		if (sc->sc_type == WM_T_80003) {
   5870 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5871 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5872 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5873 
   5874 			/* Bypass RX and TX FIFO's */
   5875 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5876 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5877 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5878 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5879 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5880 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5881 		}
   5882 	}
   5883 #if 0
   5884 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5885 #endif
   5886 
   5887 	/* Set up checksum offload parameters. */
   5888 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5889 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5890 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5891 		reg |= RXCSUM_IPOFL;
   5892 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5893 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5894 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5895 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5896 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5897 
   5898 	/* Set registers about MSI-X */
   5899 	if (wm_is_using_msix(sc)) {
   5900 		uint32_t ivar;
   5901 		struct wm_queue *wmq;
   5902 		int qid, qintr_idx;
   5903 
   5904 		if (sc->sc_type == WM_T_82575) {
   5905 			/* Interrupt control */
   5906 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5907 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5908 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5909 
   5910 			/* TX and RX */
   5911 			for (i = 0; i < sc->sc_nqueues; i++) {
   5912 				wmq = &sc->sc_queue[i];
   5913 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5914 				    EITR_TX_QUEUE(wmq->wmq_id)
   5915 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5916 			}
   5917 			/* Link status */
   5918 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5919 			    EITR_OTHER);
   5920 		} else if (sc->sc_type == WM_T_82574) {
   5921 			/* Interrupt control */
   5922 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5923 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5924 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5925 
   5926 			/*
   5927 			 * workaround issue with spurious interrupts
   5928 			 * in MSI-X mode.
   5929 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5930 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5931 			 */
   5932 			reg = CSR_READ(sc, WMREG_RFCTL);
   5933 			reg |= WMREG_RFCTL_ACKDIS;
   5934 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5935 
   5936 			ivar = 0;
   5937 			/* TX and RX */
   5938 			for (i = 0; i < sc->sc_nqueues; i++) {
   5939 				wmq = &sc->sc_queue[i];
   5940 				qid = wmq->wmq_id;
   5941 				qintr_idx = wmq->wmq_intr_idx;
   5942 
   5943 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5944 				    IVAR_TX_MASK_Q_82574(qid));
   5945 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5946 				    IVAR_RX_MASK_Q_82574(qid));
   5947 			}
   5948 			/* Link status */
   5949 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5950 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5951 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5952 		} else {
   5953 			/* Interrupt control */
   5954 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5955 			    | GPIE_EIAME | GPIE_PBA);
   5956 
   5957 			switch (sc->sc_type) {
   5958 			case WM_T_82580:
   5959 			case WM_T_I350:
   5960 			case WM_T_I354:
   5961 			case WM_T_I210:
   5962 			case WM_T_I211:
   5963 				/* TX and RX */
   5964 				for (i = 0; i < sc->sc_nqueues; i++) {
   5965 					wmq = &sc->sc_queue[i];
   5966 					qid = wmq->wmq_id;
   5967 					qintr_idx = wmq->wmq_intr_idx;
   5968 
   5969 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5970 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5971 					ivar |= __SHIFTIN((qintr_idx
   5972 						| IVAR_VALID),
   5973 					    IVAR_TX_MASK_Q(qid));
   5974 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5975 					ivar |= __SHIFTIN((qintr_idx
   5976 						| IVAR_VALID),
   5977 					    IVAR_RX_MASK_Q(qid));
   5978 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5979 				}
   5980 				break;
   5981 			case WM_T_82576:
   5982 				/* TX and RX */
   5983 				for (i = 0; i < sc->sc_nqueues; i++) {
   5984 					wmq = &sc->sc_queue[i];
   5985 					qid = wmq->wmq_id;
   5986 					qintr_idx = wmq->wmq_intr_idx;
   5987 
   5988 					ivar = CSR_READ(sc,
   5989 					    WMREG_IVAR_Q_82576(qid));
   5990 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5991 					ivar |= __SHIFTIN((qintr_idx
   5992 						| IVAR_VALID),
   5993 					    IVAR_TX_MASK_Q_82576(qid));
   5994 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5995 					ivar |= __SHIFTIN((qintr_idx
   5996 						| IVAR_VALID),
   5997 					    IVAR_RX_MASK_Q_82576(qid));
   5998 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5999 					    ivar);
   6000 				}
   6001 				break;
   6002 			default:
   6003 				break;
   6004 			}
   6005 
   6006 			/* Link status */
   6007 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   6008 			    IVAR_MISC_OTHER);
   6009 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6010 		}
   6011 
   6012 		if (wm_is_using_multiqueue(sc)) {
   6013 			wm_init_rss(sc);
   6014 
   6015 			/*
   6016 			** NOTE: Receive Full-Packet Checksum Offload
   6017 			** is mutually exclusive with Multiqueue. However
   6018 			** this is not the same as TCP/IP checksums which
   6019 			** still work.
   6020 			*/
   6021 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6022 			reg |= RXCSUM_PCSD;
   6023 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6024 		}
   6025 	}
   6026 
   6027 	/* Set up the interrupt registers. */
   6028 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6029 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6030 	    ICR_RXO | ICR_RXT0;
   6031 	if (wm_is_using_msix(sc)) {
   6032 		uint32_t mask;
   6033 		struct wm_queue *wmq;
   6034 
   6035 		switch (sc->sc_type) {
   6036 		case WM_T_82574:
   6037 			mask = 0;
   6038 			for (i = 0; i < sc->sc_nqueues; i++) {
   6039 				wmq = &sc->sc_queue[i];
   6040 				mask |= ICR_TXQ(wmq->wmq_id);
   6041 				mask |= ICR_RXQ(wmq->wmq_id);
   6042 			}
   6043 			mask |= ICR_OTHER;
   6044 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6045 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6046 			break;
   6047 		default:
   6048 			if (sc->sc_type == WM_T_82575) {
   6049 				mask = 0;
   6050 				for (i = 0; i < sc->sc_nqueues; i++) {
   6051 					wmq = &sc->sc_queue[i];
   6052 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6053 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6054 				}
   6055 				mask |= EITR_OTHER;
   6056 			} else {
   6057 				mask = 0;
   6058 				for (i = 0; i < sc->sc_nqueues; i++) {
   6059 					wmq = &sc->sc_queue[i];
   6060 					mask |= 1 << wmq->wmq_intr_idx;
   6061 				}
   6062 				mask |= 1 << sc->sc_link_intr_idx;
   6063 			}
   6064 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6065 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6066 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6067 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   6068 			break;
   6069 		}
   6070 	} else
   6071 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6072 
   6073 	/* Set up the inter-packet gap. */
   6074 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6075 
   6076 	if (sc->sc_type >= WM_T_82543) {
   6077 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6078 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6079 			wm_itrs_writereg(sc, wmq);
   6080 		}
   6081 		/*
   6082 		 * Link interrupts occur much less than TX
   6083 		 * interrupts and RX interrupts. So, we don't
   6084 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6085 		 * FreeBSD's if_igb.
   6086 		 */
   6087 	}
   6088 
   6089 	/* Set the VLAN ethernetype. */
   6090 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6091 
   6092 	/*
   6093 	 * Set up the transmit control register; we start out with
   6094 	 * a collision distance suitable for FDX, but update it whe
   6095 	 * we resolve the media type.
   6096 	 */
   6097 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6098 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6099 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6100 	if (sc->sc_type >= WM_T_82571)
   6101 		sc->sc_tctl |= TCTL_MULR;
   6102 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6103 
   6104 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6105 		/* Write TDT after TCTL.EN is set. See the document. */
   6106 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6107 	}
   6108 
   6109 	if (sc->sc_type == WM_T_80003) {
   6110 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6111 		reg &= ~TCTL_EXT_GCEX_MASK;
   6112 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6113 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6114 	}
   6115 
   6116 	/* Set the media. */
   6117 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6118 		goto out;
   6119 
   6120 	/* Configure for OS presence */
   6121 	wm_init_manageability(sc);
   6122 
   6123 	/*
   6124 	 * Set up the receive control register; we actually program the
   6125 	 * register when we set the receive filter. Use multicast address
   6126 	 * offset type 0.
   6127 	 *
   6128 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6129 	 * don't enable that feature.
   6130 	 */
   6131 	sc->sc_mchash_type = 0;
   6132 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6133 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6134 
   6135 	/*
   6136 	 * 82574 use one buffer extended Rx descriptor.
   6137 	 */
   6138 	if (sc->sc_type == WM_T_82574)
   6139 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6140 
   6141 	/*
   6142 	 * The I350 has a bug where it always strips the CRC whether
   6143 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6144 	 */
   6145 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6146 	    || (sc->sc_type == WM_T_I210))
   6147 		sc->sc_rctl |= RCTL_SECRC;
   6148 
   6149 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6150 	    && (ifp->if_mtu > ETHERMTU)) {
   6151 		sc->sc_rctl |= RCTL_LPE;
   6152 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6153 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6154 	}
   6155 
   6156 	if (MCLBYTES == 2048)
   6157 		sc->sc_rctl |= RCTL_2k;
   6158 	else {
   6159 		if (sc->sc_type >= WM_T_82543) {
   6160 			switch (MCLBYTES) {
   6161 			case 4096:
   6162 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6163 				break;
   6164 			case 8192:
   6165 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6166 				break;
   6167 			case 16384:
   6168 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6169 				break;
   6170 			default:
   6171 				panic("wm_init: MCLBYTES %d unsupported",
   6172 				    MCLBYTES);
   6173 				break;
   6174 			}
   6175 		} else
   6176 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6177 	}
   6178 
   6179 	/* Enable ECC */
   6180 	switch (sc->sc_type) {
   6181 	case WM_T_82571:
   6182 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6183 		reg |= PBA_ECC_CORR_EN;
   6184 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6185 		break;
   6186 	case WM_T_PCH_LPT:
   6187 	case WM_T_PCH_SPT:
   6188 	case WM_T_PCH_CNP:
   6189 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6190 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6191 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6192 
   6193 		sc->sc_ctrl |= CTRL_MEHE;
   6194 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6195 		break;
   6196 	default:
   6197 		break;
   6198 	}
   6199 
   6200 	/*
   6201 	 * Set the receive filter.
   6202 	 *
   6203 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6204 	 * the setting of RCTL.EN in wm_set_filter()
   6205 	 */
   6206 	wm_set_filter(sc);
   6207 
   6208 	/* On 575 and later set RDT only if RX enabled */
   6209 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6210 		int qidx;
   6211 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6212 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6213 			for (i = 0; i < WM_NRXDESC; i++) {
   6214 				mutex_enter(rxq->rxq_lock);
   6215 				wm_init_rxdesc(rxq, i);
   6216 				mutex_exit(rxq->rxq_lock);
   6217 
   6218 			}
   6219 		}
   6220 	}
   6221 
   6222 	wm_unset_stopping_flags(sc);
   6223 
   6224 	/* Start the one second link check clock. */
   6225 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6226 
   6227 	/* ...all done! */
   6228 	ifp->if_flags |= IFF_RUNNING;
   6229 	ifp->if_flags &= ~IFF_OACTIVE;
   6230 
   6231  out:
   6232 	/* Save last flags for the callback */
   6233 	sc->sc_if_flags = ifp->if_flags;
   6234 	sc->sc_ec_capenable = ec->ec_capenable;
   6235 	if (error)
   6236 		log(LOG_ERR, "%s: interface not running\n",
   6237 		    device_xname(sc->sc_dev));
   6238 	return error;
   6239 }
   6240 
   6241 /*
   6242  * wm_stop:		[ifnet interface function]
   6243  *
   6244  *	Stop transmission on the interface.
   6245  */
   6246 static void
   6247 wm_stop(struct ifnet *ifp, int disable)
   6248 {
   6249 	struct wm_softc *sc = ifp->if_softc;
   6250 
   6251 	WM_CORE_LOCK(sc);
   6252 	wm_stop_locked(ifp, disable);
   6253 	WM_CORE_UNLOCK(sc);
   6254 }
   6255 
   6256 static void
   6257 wm_stop_locked(struct ifnet *ifp, int disable)
   6258 {
   6259 	struct wm_softc *sc = ifp->if_softc;
   6260 	struct wm_txsoft *txs;
   6261 	int i, qidx;
   6262 
   6263 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6264 		device_xname(sc->sc_dev), __func__));
   6265 	KASSERT(WM_CORE_LOCKED(sc));
   6266 
   6267 	wm_set_stopping_flags(sc);
   6268 
   6269 	/* Stop the one second clock. */
   6270 	callout_stop(&sc->sc_tick_ch);
   6271 
   6272 	/* Stop the 82547 Tx FIFO stall check timer. */
   6273 	if (sc->sc_type == WM_T_82547)
   6274 		callout_stop(&sc->sc_txfifo_ch);
   6275 
   6276 	if (sc->sc_flags & WM_F_HAS_MII) {
   6277 		/* Down the MII. */
   6278 		mii_down(&sc->sc_mii);
   6279 	} else {
   6280 #if 0
   6281 		/* Should we clear PHY's status properly? */
   6282 		wm_reset(sc);
   6283 #endif
   6284 	}
   6285 
   6286 	/* Stop the transmit and receive processes. */
   6287 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6288 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6289 	sc->sc_rctl &= ~RCTL_EN;
   6290 
   6291 	/*
   6292 	 * Clear the interrupt mask to ensure the device cannot assert its
   6293 	 * interrupt line.
   6294 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6295 	 * service any currently pending or shared interrupt.
   6296 	 */
   6297 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6298 	sc->sc_icr = 0;
   6299 	if (wm_is_using_msix(sc)) {
   6300 		if (sc->sc_type != WM_T_82574) {
   6301 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6302 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6303 		} else
   6304 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6305 	}
   6306 
   6307 	/* Release any queued transmit buffers. */
   6308 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6309 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6310 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6311 		mutex_enter(txq->txq_lock);
   6312 		txq->txq_sending = false; /* ensure watchdog disabled */
   6313 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6314 			txs = &txq->txq_soft[i];
   6315 			if (txs->txs_mbuf != NULL) {
   6316 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6317 				m_freem(txs->txs_mbuf);
   6318 				txs->txs_mbuf = NULL;
   6319 			}
   6320 		}
   6321 		mutex_exit(txq->txq_lock);
   6322 	}
   6323 
   6324 	/* Mark the interface as down and cancel the watchdog timer. */
   6325 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6326 
   6327 	if (disable) {
   6328 		for (i = 0; i < sc->sc_nqueues; i++) {
   6329 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6330 			mutex_enter(rxq->rxq_lock);
   6331 			wm_rxdrain(rxq);
   6332 			mutex_exit(rxq->rxq_lock);
   6333 		}
   6334 	}
   6335 
   6336 #if 0 /* notyet */
   6337 	if (sc->sc_type >= WM_T_82544)
   6338 		CSR_WRITE(sc, WMREG_WUC, 0);
   6339 #endif
   6340 }
   6341 
   6342 static void
   6343 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6344 {
   6345 	struct mbuf *m;
   6346 	int i;
   6347 
   6348 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6349 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6350 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6351 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6352 		    m->m_data, m->m_len, m->m_flags);
   6353 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6354 	    i, i == 1 ? "" : "s");
   6355 }
   6356 
   6357 /*
   6358  * wm_82547_txfifo_stall:
   6359  *
   6360  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6361  *	reset the FIFO pointers, and restart packet transmission.
   6362  */
   6363 static void
   6364 wm_82547_txfifo_stall(void *arg)
   6365 {
   6366 	struct wm_softc *sc = arg;
   6367 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6368 
   6369 	mutex_enter(txq->txq_lock);
   6370 
   6371 	if (txq->txq_stopping)
   6372 		goto out;
   6373 
   6374 	if (txq->txq_fifo_stall) {
   6375 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6376 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6377 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6378 			/*
   6379 			 * Packets have drained.  Stop transmitter, reset
   6380 			 * FIFO pointers, restart transmitter, and kick
   6381 			 * the packet queue.
   6382 			 */
   6383 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6384 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6385 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6386 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6387 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6388 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6389 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6390 			CSR_WRITE_FLUSH(sc);
   6391 
   6392 			txq->txq_fifo_head = 0;
   6393 			txq->txq_fifo_stall = 0;
   6394 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6395 		} else {
   6396 			/*
   6397 			 * Still waiting for packets to drain; try again in
   6398 			 * another tick.
   6399 			 */
   6400 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6401 		}
   6402 	}
   6403 
   6404 out:
   6405 	mutex_exit(txq->txq_lock);
   6406 }
   6407 
   6408 /*
   6409  * wm_82547_txfifo_bugchk:
   6410  *
   6411  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6412  *	prevent enqueueing a packet that would wrap around the end
   6413  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6414  *
   6415  *	We do this by checking the amount of space before the end
   6416  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6417  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6418  *	the internal FIFO pointers to the beginning, and restart
   6419  *	transmission on the interface.
   6420  */
   6421 #define	WM_FIFO_HDR		0x10
   6422 #define	WM_82547_PAD_LEN	0x3e0
   6423 static int
   6424 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6425 {
   6426 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6427 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6428 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6429 
   6430 	/* Just return if already stalled. */
   6431 	if (txq->txq_fifo_stall)
   6432 		return 1;
   6433 
   6434 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6435 		/* Stall only occurs in half-duplex mode. */
   6436 		goto send_packet;
   6437 	}
   6438 
   6439 	if (len >= WM_82547_PAD_LEN + space) {
   6440 		txq->txq_fifo_stall = 1;
   6441 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6442 		return 1;
   6443 	}
   6444 
   6445  send_packet:
   6446 	txq->txq_fifo_head += len;
   6447 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6448 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6449 
   6450 	return 0;
   6451 }
   6452 
   6453 static int
   6454 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6455 {
   6456 	int error;
   6457 
   6458 	/*
   6459 	 * Allocate the control data structures, and create and load the
   6460 	 * DMA map for it.
   6461 	 *
   6462 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6463 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6464 	 * both sets within the same 4G segment.
   6465 	 */
   6466 	if (sc->sc_type < WM_T_82544)
   6467 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6468 	else
   6469 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6470 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6471 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6472 	else
   6473 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6474 
   6475 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6476 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6477 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6478 		aprint_error_dev(sc->sc_dev,
   6479 		    "unable to allocate TX control data, error = %d\n",
   6480 		    error);
   6481 		goto fail_0;
   6482 	}
   6483 
   6484 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6485 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6486 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6487 		aprint_error_dev(sc->sc_dev,
   6488 		    "unable to map TX control data, error = %d\n", error);
   6489 		goto fail_1;
   6490 	}
   6491 
   6492 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6493 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6494 		aprint_error_dev(sc->sc_dev,
   6495 		    "unable to create TX control data DMA map, error = %d\n",
   6496 		    error);
   6497 		goto fail_2;
   6498 	}
   6499 
   6500 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6501 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6502 		aprint_error_dev(sc->sc_dev,
   6503 		    "unable to load TX control data DMA map, error = %d\n",
   6504 		    error);
   6505 		goto fail_3;
   6506 	}
   6507 
   6508 	return 0;
   6509 
   6510  fail_3:
   6511 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6512  fail_2:
   6513 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6514 	    WM_TXDESCS_SIZE(txq));
   6515  fail_1:
   6516 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6517  fail_0:
   6518 	return error;
   6519 }
   6520 
   6521 static void
   6522 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6523 {
   6524 
   6525 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6526 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6527 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6528 	    WM_TXDESCS_SIZE(txq));
   6529 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6530 }
   6531 
   6532 static int
   6533 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6534 {
   6535 	int error;
   6536 	size_t rxq_descs_size;
   6537 
   6538 	/*
   6539 	 * Allocate the control data structures, and create and load the
   6540 	 * DMA map for it.
   6541 	 *
   6542 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6543 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6544 	 * both sets within the same 4G segment.
   6545 	 */
   6546 	rxq->rxq_ndesc = WM_NRXDESC;
   6547 	if (sc->sc_type == WM_T_82574)
   6548 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6549 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6550 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6551 	else
   6552 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6553 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6554 
   6555 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6556 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6557 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6558 		aprint_error_dev(sc->sc_dev,
   6559 		    "unable to allocate RX control data, error = %d\n",
   6560 		    error);
   6561 		goto fail_0;
   6562 	}
   6563 
   6564 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6565 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6566 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6567 		aprint_error_dev(sc->sc_dev,
   6568 		    "unable to map RX control data, error = %d\n", error);
   6569 		goto fail_1;
   6570 	}
   6571 
   6572 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6573 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6574 		aprint_error_dev(sc->sc_dev,
   6575 		    "unable to create RX control data DMA map, error = %d\n",
   6576 		    error);
   6577 		goto fail_2;
   6578 	}
   6579 
   6580 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6581 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6582 		aprint_error_dev(sc->sc_dev,
   6583 		    "unable to load RX control data DMA map, error = %d\n",
   6584 		    error);
   6585 		goto fail_3;
   6586 	}
   6587 
   6588 	return 0;
   6589 
   6590  fail_3:
   6591 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6592  fail_2:
   6593 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6594 	    rxq_descs_size);
   6595  fail_1:
   6596 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6597  fail_0:
   6598 	return error;
   6599 }
   6600 
   6601 static void
   6602 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6603 {
   6604 
   6605 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6606 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6607 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6608 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6609 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6610 }
   6611 
   6612 
   6613 static int
   6614 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6615 {
   6616 	int i, error;
   6617 
   6618 	/* Create the transmit buffer DMA maps. */
   6619 	WM_TXQUEUELEN(txq) =
   6620 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6621 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6622 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6623 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6624 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6625 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6626 			aprint_error_dev(sc->sc_dev,
   6627 			    "unable to create Tx DMA map %d, error = %d\n",
   6628 			    i, error);
   6629 			goto fail;
   6630 		}
   6631 	}
   6632 
   6633 	return 0;
   6634 
   6635  fail:
   6636 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6637 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6638 			bus_dmamap_destroy(sc->sc_dmat,
   6639 			    txq->txq_soft[i].txs_dmamap);
   6640 	}
   6641 	return error;
   6642 }
   6643 
   6644 static void
   6645 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6646 {
   6647 	int i;
   6648 
   6649 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6650 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6651 			bus_dmamap_destroy(sc->sc_dmat,
   6652 			    txq->txq_soft[i].txs_dmamap);
   6653 	}
   6654 }
   6655 
   6656 static int
   6657 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6658 {
   6659 	int i, error;
   6660 
   6661 	/* Create the receive buffer DMA maps. */
   6662 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6663 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6664 			    MCLBYTES, 0, 0,
   6665 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6666 			aprint_error_dev(sc->sc_dev,
   6667 			    "unable to create Rx DMA map %d error = %d\n",
   6668 			    i, error);
   6669 			goto fail;
   6670 		}
   6671 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6672 	}
   6673 
   6674 	return 0;
   6675 
   6676  fail:
   6677 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6678 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6679 			bus_dmamap_destroy(sc->sc_dmat,
   6680 			    rxq->rxq_soft[i].rxs_dmamap);
   6681 	}
   6682 	return error;
   6683 }
   6684 
   6685 static void
   6686 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6687 {
   6688 	int i;
   6689 
   6690 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6691 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6692 			bus_dmamap_destroy(sc->sc_dmat,
   6693 			    rxq->rxq_soft[i].rxs_dmamap);
   6694 	}
   6695 }
   6696 
   6697 /*
   6698  * wm_alloc_quques:
   6699  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6700  */
   6701 static int
   6702 wm_alloc_txrx_queues(struct wm_softc *sc)
   6703 {
   6704 	int i, error, tx_done, rx_done;
   6705 
   6706 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6707 	    KM_SLEEP);
   6708 	if (sc->sc_queue == NULL) {
   6709 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6710 		error = ENOMEM;
   6711 		goto fail_0;
   6712 	}
   6713 
   6714 	/*
   6715 	 * For transmission
   6716 	 */
   6717 	error = 0;
   6718 	tx_done = 0;
   6719 	for (i = 0; i < sc->sc_nqueues; i++) {
   6720 #ifdef WM_EVENT_COUNTERS
   6721 		int j;
   6722 		const char *xname;
   6723 #endif
   6724 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6725 		txq->txq_sc = sc;
   6726 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6727 
   6728 		error = wm_alloc_tx_descs(sc, txq);
   6729 		if (error)
   6730 			break;
   6731 		error = wm_alloc_tx_buffer(sc, txq);
   6732 		if (error) {
   6733 			wm_free_tx_descs(sc, txq);
   6734 			break;
   6735 		}
   6736 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6737 		if (txq->txq_interq == NULL) {
   6738 			wm_free_tx_descs(sc, txq);
   6739 			wm_free_tx_buffer(sc, txq);
   6740 			error = ENOMEM;
   6741 			break;
   6742 		}
   6743 
   6744 #ifdef WM_EVENT_COUNTERS
   6745 		xname = device_xname(sc->sc_dev);
   6746 
   6747 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6748 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6749 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6750 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6751 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6752 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6753 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6754 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6755 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6756 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6757 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6758 
   6759 		for (j = 0; j < WM_NTXSEGS; j++) {
   6760 			snprintf(txq->txq_txseg_evcnt_names[j],
   6761 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6762 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6763 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6764 		}
   6765 
   6766 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6767 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6768 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6769 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6770 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6771 #endif /* WM_EVENT_COUNTERS */
   6772 
   6773 		tx_done++;
   6774 	}
   6775 	if (error)
   6776 		goto fail_1;
   6777 
   6778 	/*
   6779 	 * For recieve
   6780 	 */
   6781 	error = 0;
   6782 	rx_done = 0;
   6783 	for (i = 0; i < sc->sc_nqueues; i++) {
   6784 #ifdef WM_EVENT_COUNTERS
   6785 		const char *xname;
   6786 #endif
   6787 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6788 		rxq->rxq_sc = sc;
   6789 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6790 
   6791 		error = wm_alloc_rx_descs(sc, rxq);
   6792 		if (error)
   6793 			break;
   6794 
   6795 		error = wm_alloc_rx_buffer(sc, rxq);
   6796 		if (error) {
   6797 			wm_free_rx_descs(sc, rxq);
   6798 			break;
   6799 		}
   6800 
   6801 #ifdef WM_EVENT_COUNTERS
   6802 		xname = device_xname(sc->sc_dev);
   6803 
   6804 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6805 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6806 
   6807 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6808 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6809 #endif /* WM_EVENT_COUNTERS */
   6810 
   6811 		rx_done++;
   6812 	}
   6813 	if (error)
   6814 		goto fail_2;
   6815 
   6816 	return 0;
   6817 
   6818  fail_2:
   6819 	for (i = 0; i < rx_done; i++) {
   6820 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6821 		wm_free_rx_buffer(sc, rxq);
   6822 		wm_free_rx_descs(sc, rxq);
   6823 		if (rxq->rxq_lock)
   6824 			mutex_obj_free(rxq->rxq_lock);
   6825 	}
   6826  fail_1:
   6827 	for (i = 0; i < tx_done; i++) {
   6828 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6829 		pcq_destroy(txq->txq_interq);
   6830 		wm_free_tx_buffer(sc, txq);
   6831 		wm_free_tx_descs(sc, txq);
   6832 		if (txq->txq_lock)
   6833 			mutex_obj_free(txq->txq_lock);
   6834 	}
   6835 
   6836 	kmem_free(sc->sc_queue,
   6837 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6838  fail_0:
   6839 	return error;
   6840 }
   6841 
   6842 /*
   6843  * wm_free_quques:
   6844  *	Free {tx,rx}descs and {tx,rx} buffers
   6845  */
   6846 static void
   6847 wm_free_txrx_queues(struct wm_softc *sc)
   6848 {
   6849 	int i;
   6850 
   6851 	for (i = 0; i < sc->sc_nqueues; i++) {
   6852 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6853 
   6854 #ifdef WM_EVENT_COUNTERS
   6855 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6856 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6857 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6858 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6859 #endif /* WM_EVENT_COUNTERS */
   6860 
   6861 		wm_free_rx_buffer(sc, rxq);
   6862 		wm_free_rx_descs(sc, rxq);
   6863 		if (rxq->rxq_lock)
   6864 			mutex_obj_free(rxq->rxq_lock);
   6865 	}
   6866 
   6867 	for (i = 0; i < sc->sc_nqueues; i++) {
   6868 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6869 		struct mbuf *m;
   6870 #ifdef WM_EVENT_COUNTERS
   6871 		int j;
   6872 
   6873 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6874 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6875 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6876 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6877 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6878 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6879 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6880 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6881 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6882 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6883 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6884 
   6885 		for (j = 0; j < WM_NTXSEGS; j++)
   6886 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6887 
   6888 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6889 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6890 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6891 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6892 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6893 #endif /* WM_EVENT_COUNTERS */
   6894 
   6895 		/* drain txq_interq */
   6896 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6897 			m_freem(m);
   6898 		pcq_destroy(txq->txq_interq);
   6899 
   6900 		wm_free_tx_buffer(sc, txq);
   6901 		wm_free_tx_descs(sc, txq);
   6902 		if (txq->txq_lock)
   6903 			mutex_obj_free(txq->txq_lock);
   6904 	}
   6905 
   6906 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6907 }
   6908 
   6909 static void
   6910 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6911 {
   6912 
   6913 	KASSERT(mutex_owned(txq->txq_lock));
   6914 
   6915 	/* Initialize the transmit descriptor ring. */
   6916 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6917 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6918 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6919 	txq->txq_free = WM_NTXDESC(txq);
   6920 	txq->txq_next = 0;
   6921 }
   6922 
   6923 static void
   6924 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6925     struct wm_txqueue *txq)
   6926 {
   6927 
   6928 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6929 		device_xname(sc->sc_dev), __func__));
   6930 	KASSERT(mutex_owned(txq->txq_lock));
   6931 
   6932 	if (sc->sc_type < WM_T_82543) {
   6933 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6934 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6935 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6936 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6937 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6938 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6939 	} else {
   6940 		int qid = wmq->wmq_id;
   6941 
   6942 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6943 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6944 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6945 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6946 
   6947 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6948 			/*
   6949 			 * Don't write TDT before TCTL.EN is set.
   6950 			 * See the document.
   6951 			 */
   6952 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6953 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6954 			    | TXDCTL_WTHRESH(0));
   6955 		else {
   6956 			/* XXX should update with AIM? */
   6957 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6958 			if (sc->sc_type >= WM_T_82540) {
   6959 				/* should be same */
   6960 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6961 			}
   6962 
   6963 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6964 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6965 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6966 		}
   6967 	}
   6968 }
   6969 
   6970 static void
   6971 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6972 {
   6973 	int i;
   6974 
   6975 	KASSERT(mutex_owned(txq->txq_lock));
   6976 
   6977 	/* Initialize the transmit job descriptors. */
   6978 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6979 		txq->txq_soft[i].txs_mbuf = NULL;
   6980 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6981 	txq->txq_snext = 0;
   6982 	txq->txq_sdirty = 0;
   6983 }
   6984 
   6985 static void
   6986 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6987     struct wm_txqueue *txq)
   6988 {
   6989 
   6990 	KASSERT(mutex_owned(txq->txq_lock));
   6991 
   6992 	/*
   6993 	 * Set up some register offsets that are different between
   6994 	 * the i82542 and the i82543 and later chips.
   6995 	 */
   6996 	if (sc->sc_type < WM_T_82543)
   6997 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6998 	else
   6999 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   7000 
   7001 	wm_init_tx_descs(sc, txq);
   7002 	wm_init_tx_regs(sc, wmq, txq);
   7003 	wm_init_tx_buffer(sc, txq);
   7004 
   7005 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   7006 	txq->txq_sending = false;
   7007 }
   7008 
   7009 static void
   7010 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7011     struct wm_rxqueue *rxq)
   7012 {
   7013 
   7014 	KASSERT(mutex_owned(rxq->rxq_lock));
   7015 
   7016 	/*
   7017 	 * Initialize the receive descriptor and receive job
   7018 	 * descriptor rings.
   7019 	 */
   7020 	if (sc->sc_type < WM_T_82543) {
   7021 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7022 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7023 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7024 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7025 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7026 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7027 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7028 
   7029 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7030 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7031 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7032 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7033 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7034 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7035 	} else {
   7036 		int qid = wmq->wmq_id;
   7037 
   7038 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7039 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7040 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7041 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7042 
   7043 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7044 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7045 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7046 
   7047 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7048 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7049 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7050 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7051 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7052 			    | RXDCTL_WTHRESH(1));
   7053 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7054 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7055 		} else {
   7056 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7057 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7058 			/* XXX should update with AIM? */
   7059 			CSR_WRITE(sc, WMREG_RDTR,
   7060 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7061 			/* MUST be same */
   7062 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7063 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7064 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7065 		}
   7066 	}
   7067 }
   7068 
   7069 static int
   7070 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7071 {
   7072 	struct wm_rxsoft *rxs;
   7073 	int error, i;
   7074 
   7075 	KASSERT(mutex_owned(rxq->rxq_lock));
   7076 
   7077 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7078 		rxs = &rxq->rxq_soft[i];
   7079 		if (rxs->rxs_mbuf == NULL) {
   7080 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7081 				log(LOG_ERR, "%s: unable to allocate or map "
   7082 				    "rx buffer %d, error = %d\n",
   7083 				    device_xname(sc->sc_dev), i, error);
   7084 				/*
   7085 				 * XXX Should attempt to run with fewer receive
   7086 				 * XXX buffers instead of just failing.
   7087 				 */
   7088 				wm_rxdrain(rxq);
   7089 				return ENOMEM;
   7090 			}
   7091 		} else {
   7092 			/*
   7093 			 * For 82575 and 82576, the RX descriptors must be
   7094 			 * initialized after the setting of RCTL.EN in
   7095 			 * wm_set_filter()
   7096 			 */
   7097 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7098 				wm_init_rxdesc(rxq, i);
   7099 		}
   7100 	}
   7101 	rxq->rxq_ptr = 0;
   7102 	rxq->rxq_discard = 0;
   7103 	WM_RXCHAIN_RESET(rxq);
   7104 
   7105 	return 0;
   7106 }
   7107 
   7108 static int
   7109 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7110     struct wm_rxqueue *rxq)
   7111 {
   7112 
   7113 	KASSERT(mutex_owned(rxq->rxq_lock));
   7114 
   7115 	/*
   7116 	 * Set up some register offsets that are different between
   7117 	 * the i82542 and the i82543 and later chips.
   7118 	 */
   7119 	if (sc->sc_type < WM_T_82543)
   7120 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7121 	else
   7122 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7123 
   7124 	wm_init_rx_regs(sc, wmq, rxq);
   7125 	return wm_init_rx_buffer(sc, rxq);
   7126 }
   7127 
   7128 /*
   7129  * wm_init_quques:
   7130  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7131  */
   7132 static int
   7133 wm_init_txrx_queues(struct wm_softc *sc)
   7134 {
   7135 	int i, error = 0;
   7136 
   7137 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7138 		device_xname(sc->sc_dev), __func__));
   7139 
   7140 	for (i = 0; i < sc->sc_nqueues; i++) {
   7141 		struct wm_queue *wmq = &sc->sc_queue[i];
   7142 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7143 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7144 
   7145 		/*
   7146 		 * TODO
   7147 		 * Currently, use constant variable instead of AIM.
   7148 		 * Furthermore, the interrupt interval of multiqueue which use
   7149 		 * polling mode is less than default value.
   7150 		 * More tuning and AIM are required.
   7151 		 */
   7152 		if (wm_is_using_multiqueue(sc))
   7153 			wmq->wmq_itr = 50;
   7154 		else
   7155 			wmq->wmq_itr = sc->sc_itr_init;
   7156 		wmq->wmq_set_itr = true;
   7157 
   7158 		mutex_enter(txq->txq_lock);
   7159 		wm_init_tx_queue(sc, wmq, txq);
   7160 		mutex_exit(txq->txq_lock);
   7161 
   7162 		mutex_enter(rxq->rxq_lock);
   7163 		error = wm_init_rx_queue(sc, wmq, rxq);
   7164 		mutex_exit(rxq->rxq_lock);
   7165 		if (error)
   7166 			break;
   7167 	}
   7168 
   7169 	return error;
   7170 }
   7171 
   7172 /*
   7173  * wm_tx_offload:
   7174  *
   7175  *	Set up TCP/IP checksumming parameters for the
   7176  *	specified packet.
   7177  */
   7178 static int
   7179 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7180     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7181 {
   7182 	struct mbuf *m0 = txs->txs_mbuf;
   7183 	struct livengood_tcpip_ctxdesc *t;
   7184 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7185 	uint32_t ipcse;
   7186 	struct ether_header *eh;
   7187 	int offset, iphl;
   7188 	uint8_t fields;
   7189 
   7190 	/*
   7191 	 * XXX It would be nice if the mbuf pkthdr had offset
   7192 	 * fields for the protocol headers.
   7193 	 */
   7194 
   7195 	eh = mtod(m0, struct ether_header *);
   7196 	switch (htons(eh->ether_type)) {
   7197 	case ETHERTYPE_IP:
   7198 	case ETHERTYPE_IPV6:
   7199 		offset = ETHER_HDR_LEN;
   7200 		break;
   7201 
   7202 	case ETHERTYPE_VLAN:
   7203 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7204 		break;
   7205 
   7206 	default:
   7207 		/*
   7208 		 * Don't support this protocol or encapsulation.
   7209 		 */
   7210 		*fieldsp = 0;
   7211 		*cmdp = 0;
   7212 		return 0;
   7213 	}
   7214 
   7215 	if ((m0->m_pkthdr.csum_flags &
   7216 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7217 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7218 	} else
   7219 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7220 
   7221 	ipcse = offset + iphl - 1;
   7222 
   7223 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7224 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7225 	seg = 0;
   7226 	fields = 0;
   7227 
   7228 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7229 		int hlen = offset + iphl;
   7230 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7231 
   7232 		if (__predict_false(m0->m_len <
   7233 				    (hlen + sizeof(struct tcphdr)))) {
   7234 			/*
   7235 			 * TCP/IP headers are not in the first mbuf; we need
   7236 			 * to do this the slow and painful way. Let's just
   7237 			 * hope this doesn't happen very often.
   7238 			 */
   7239 			struct tcphdr th;
   7240 
   7241 			WM_Q_EVCNT_INCR(txq, tsopain);
   7242 
   7243 			m_copydata(m0, hlen, sizeof(th), &th);
   7244 			if (v4) {
   7245 				struct ip ip;
   7246 
   7247 				m_copydata(m0, offset, sizeof(ip), &ip);
   7248 				ip.ip_len = 0;
   7249 				m_copyback(m0,
   7250 				    offset + offsetof(struct ip, ip_len),
   7251 				    sizeof(ip.ip_len), &ip.ip_len);
   7252 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7253 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7254 			} else {
   7255 				struct ip6_hdr ip6;
   7256 
   7257 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7258 				ip6.ip6_plen = 0;
   7259 				m_copyback(m0,
   7260 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7261 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7262 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7263 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7264 			}
   7265 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7266 			    sizeof(th.th_sum), &th.th_sum);
   7267 
   7268 			hlen += th.th_off << 2;
   7269 		} else {
   7270 			/*
   7271 			 * TCP/IP headers are in the first mbuf; we can do
   7272 			 * this the easy way.
   7273 			 */
   7274 			struct tcphdr *th;
   7275 
   7276 			if (v4) {
   7277 				struct ip *ip =
   7278 				    (void *)(mtod(m0, char *) + offset);
   7279 				th = (void *)(mtod(m0, char *) + hlen);
   7280 
   7281 				ip->ip_len = 0;
   7282 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7283 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7284 			} else {
   7285 				struct ip6_hdr *ip6 =
   7286 				    (void *)(mtod(m0, char *) + offset);
   7287 				th = (void *)(mtod(m0, char *) + hlen);
   7288 
   7289 				ip6->ip6_plen = 0;
   7290 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7291 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7292 			}
   7293 			hlen += th->th_off << 2;
   7294 		}
   7295 
   7296 		if (v4) {
   7297 			WM_Q_EVCNT_INCR(txq, tso);
   7298 			cmdlen |= WTX_TCPIP_CMD_IP;
   7299 		} else {
   7300 			WM_Q_EVCNT_INCR(txq, tso6);
   7301 			ipcse = 0;
   7302 		}
   7303 		cmd |= WTX_TCPIP_CMD_TSE;
   7304 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7305 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7306 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7307 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7308 	}
   7309 
   7310 	/*
   7311 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7312 	 * offload feature, if we load the context descriptor, we
   7313 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7314 	 */
   7315 
   7316 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7317 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7318 	    WTX_TCPIP_IPCSE(ipcse);
   7319 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7320 		WM_Q_EVCNT_INCR(txq, ipsum);
   7321 		fields |= WTX_IXSM;
   7322 	}
   7323 
   7324 	offset += iphl;
   7325 
   7326 	if (m0->m_pkthdr.csum_flags &
   7327 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7328 		WM_Q_EVCNT_INCR(txq, tusum);
   7329 		fields |= WTX_TXSM;
   7330 		tucs = WTX_TCPIP_TUCSS(offset) |
   7331 		    WTX_TCPIP_TUCSO(offset +
   7332 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7333 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7334 	} else if ((m0->m_pkthdr.csum_flags &
   7335 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7336 		WM_Q_EVCNT_INCR(txq, tusum6);
   7337 		fields |= WTX_TXSM;
   7338 		tucs = WTX_TCPIP_TUCSS(offset) |
   7339 		    WTX_TCPIP_TUCSO(offset +
   7340 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7341 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7342 	} else {
   7343 		/* Just initialize it to a valid TCP context. */
   7344 		tucs = WTX_TCPIP_TUCSS(offset) |
   7345 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7346 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7347 	}
   7348 
   7349 	/*
   7350 	 * We don't have to write context descriptor for every packet
   7351 	 * except for 82574. For 82574, we must write context descriptor
   7352 	 * for every packet when we use two descriptor queues.
   7353 	 * It would be overhead to write context descriptor for every packet,
   7354 	 * however it does not cause problems.
   7355 	 */
   7356 	/* Fill in the context descriptor. */
   7357 	t = (struct livengood_tcpip_ctxdesc *)
   7358 	    &txq->txq_descs[txq->txq_next];
   7359 	t->tcpip_ipcs = htole32(ipcs);
   7360 	t->tcpip_tucs = htole32(tucs);
   7361 	t->tcpip_cmdlen = htole32(cmdlen);
   7362 	t->tcpip_seg = htole32(seg);
   7363 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7364 
   7365 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7366 	txs->txs_ndesc++;
   7367 
   7368 	*cmdp = cmd;
   7369 	*fieldsp = fields;
   7370 
   7371 	return 0;
   7372 }
   7373 
   7374 static inline int
   7375 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7376 {
   7377 	struct wm_softc *sc = ifp->if_softc;
   7378 	u_int cpuid = cpu_index(curcpu());
   7379 
   7380 	/*
   7381 	 * Currently, simple distribute strategy.
   7382 	 * TODO:
   7383 	 * distribute by flowid(RSS has value).
   7384 	 */
   7385 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7386 }
   7387 
   7388 /*
   7389  * wm_start:		[ifnet interface function]
   7390  *
   7391  *	Start packet transmission on the interface.
   7392  */
   7393 static void
   7394 wm_start(struct ifnet *ifp)
   7395 {
   7396 	struct wm_softc *sc = ifp->if_softc;
   7397 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7398 
   7399 #ifdef WM_MPSAFE
   7400 	KASSERT(if_is_mpsafe(ifp));
   7401 #endif
   7402 	/*
   7403 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7404 	 */
   7405 
   7406 	mutex_enter(txq->txq_lock);
   7407 	if (!txq->txq_stopping)
   7408 		wm_start_locked(ifp);
   7409 	mutex_exit(txq->txq_lock);
   7410 }
   7411 
   7412 static void
   7413 wm_start_locked(struct ifnet *ifp)
   7414 {
   7415 	struct wm_softc *sc = ifp->if_softc;
   7416 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7417 
   7418 	wm_send_common_locked(ifp, txq, false);
   7419 }
   7420 
   7421 static int
   7422 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7423 {
   7424 	int qid;
   7425 	struct wm_softc *sc = ifp->if_softc;
   7426 	struct wm_txqueue *txq;
   7427 
   7428 	qid = wm_select_txqueue(ifp, m);
   7429 	txq = &sc->sc_queue[qid].wmq_txq;
   7430 
   7431 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7432 		m_freem(m);
   7433 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7434 		return ENOBUFS;
   7435 	}
   7436 
   7437 	/*
   7438 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7439 	 */
   7440 	ifp->if_obytes += m->m_pkthdr.len;
   7441 	if (m->m_flags & M_MCAST)
   7442 		ifp->if_omcasts++;
   7443 
   7444 	if (mutex_tryenter(txq->txq_lock)) {
   7445 		if (!txq->txq_stopping)
   7446 			wm_transmit_locked(ifp, txq);
   7447 		mutex_exit(txq->txq_lock);
   7448 	}
   7449 
   7450 	return 0;
   7451 }
   7452 
   7453 static void
   7454 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7455 {
   7456 
   7457 	wm_send_common_locked(ifp, txq, true);
   7458 }
   7459 
   7460 static void
   7461 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7462     bool is_transmit)
   7463 {
   7464 	struct wm_softc *sc = ifp->if_softc;
   7465 	struct mbuf *m0;
   7466 	struct wm_txsoft *txs;
   7467 	bus_dmamap_t dmamap;
   7468 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7469 	bus_addr_t curaddr;
   7470 	bus_size_t seglen, curlen;
   7471 	uint32_t cksumcmd;
   7472 	uint8_t cksumfields;
   7473 	bool remap = true;
   7474 
   7475 	KASSERT(mutex_owned(txq->txq_lock));
   7476 
   7477 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7478 		return;
   7479 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7480 		return;
   7481 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7482 		return;
   7483 
   7484 	/* Remember the previous number of free descriptors. */
   7485 	ofree = txq->txq_free;
   7486 
   7487 	/*
   7488 	 * Loop through the send queue, setting up transmit descriptors
   7489 	 * until we drain the queue, or use up all available transmit
   7490 	 * descriptors.
   7491 	 */
   7492 	for (;;) {
   7493 		m0 = NULL;
   7494 
   7495 		/* Get a work queue entry. */
   7496 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7497 			wm_txeof(txq, UINT_MAX);
   7498 			if (txq->txq_sfree == 0) {
   7499 				DPRINTF(WM_DEBUG_TX,
   7500 				    ("%s: TX: no free job descriptors\n",
   7501 					device_xname(sc->sc_dev)));
   7502 				WM_Q_EVCNT_INCR(txq, txsstall);
   7503 				break;
   7504 			}
   7505 		}
   7506 
   7507 		/* Grab a packet off the queue. */
   7508 		if (is_transmit)
   7509 			m0 = pcq_get(txq->txq_interq);
   7510 		else
   7511 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7512 		if (m0 == NULL)
   7513 			break;
   7514 
   7515 		DPRINTF(WM_DEBUG_TX,
   7516 		    ("%s: TX: have packet to transmit: %p\n",
   7517 			device_xname(sc->sc_dev), m0));
   7518 
   7519 		txs = &txq->txq_soft[txq->txq_snext];
   7520 		dmamap = txs->txs_dmamap;
   7521 
   7522 		use_tso = (m0->m_pkthdr.csum_flags &
   7523 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7524 
   7525 		/*
   7526 		 * So says the Linux driver:
   7527 		 * The controller does a simple calculation to make sure
   7528 		 * there is enough room in the FIFO before initiating the
   7529 		 * DMA for each buffer. The calc is:
   7530 		 *	4 = ceil(buffer len / MSS)
   7531 		 * To make sure we don't overrun the FIFO, adjust the max
   7532 		 * buffer len if the MSS drops.
   7533 		 */
   7534 		dmamap->dm_maxsegsz =
   7535 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7536 		    ? m0->m_pkthdr.segsz << 2
   7537 		    : WTX_MAX_LEN;
   7538 
   7539 		/*
   7540 		 * Load the DMA map.  If this fails, the packet either
   7541 		 * didn't fit in the allotted number of segments, or we
   7542 		 * were short on resources.  For the too-many-segments
   7543 		 * case, we simply report an error and drop the packet,
   7544 		 * since we can't sanely copy a jumbo packet to a single
   7545 		 * buffer.
   7546 		 */
   7547 retry:
   7548 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7549 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7550 		if (__predict_false(error)) {
   7551 			if (error == EFBIG) {
   7552 				if (remap == true) {
   7553 					struct mbuf *m;
   7554 
   7555 					remap = false;
   7556 					m = m_defrag(m0, M_NOWAIT);
   7557 					if (m != NULL) {
   7558 						WM_Q_EVCNT_INCR(txq, defrag);
   7559 						m0 = m;
   7560 						goto retry;
   7561 					}
   7562 				}
   7563 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7564 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7565 				    "DMA segments, dropping...\n",
   7566 				    device_xname(sc->sc_dev));
   7567 				wm_dump_mbuf_chain(sc, m0);
   7568 				m_freem(m0);
   7569 				continue;
   7570 			}
   7571 			/*  Short on resources, just stop for now. */
   7572 			DPRINTF(WM_DEBUG_TX,
   7573 			    ("%s: TX: dmamap load failed: %d\n",
   7574 				device_xname(sc->sc_dev), error));
   7575 			break;
   7576 		}
   7577 
   7578 		segs_needed = dmamap->dm_nsegs;
   7579 		if (use_tso) {
   7580 			/* For sentinel descriptor; see below. */
   7581 			segs_needed++;
   7582 		}
   7583 
   7584 		/*
   7585 		 * Ensure we have enough descriptors free to describe
   7586 		 * the packet. Note, we always reserve one descriptor
   7587 		 * at the end of the ring due to the semantics of the
   7588 		 * TDT register, plus one more in the event we need
   7589 		 * to load offload context.
   7590 		 */
   7591 		if (segs_needed > txq->txq_free - 2) {
   7592 			/*
   7593 			 * Not enough free descriptors to transmit this
   7594 			 * packet.  We haven't committed anything yet,
   7595 			 * so just unload the DMA map, put the packet
   7596 			 * pack on the queue, and punt. Notify the upper
   7597 			 * layer that there are no more slots left.
   7598 			 */
   7599 			DPRINTF(WM_DEBUG_TX,
   7600 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7601 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7602 				segs_needed, txq->txq_free - 1));
   7603 			if (!is_transmit)
   7604 				ifp->if_flags |= IFF_OACTIVE;
   7605 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7606 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7607 			WM_Q_EVCNT_INCR(txq, txdstall);
   7608 			break;
   7609 		}
   7610 
   7611 		/*
   7612 		 * Check for 82547 Tx FIFO bug. We need to do this
   7613 		 * once we know we can transmit the packet, since we
   7614 		 * do some internal FIFO space accounting here.
   7615 		 */
   7616 		if (sc->sc_type == WM_T_82547 &&
   7617 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7618 			DPRINTF(WM_DEBUG_TX,
   7619 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7620 				device_xname(sc->sc_dev)));
   7621 			if (!is_transmit)
   7622 				ifp->if_flags |= IFF_OACTIVE;
   7623 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7624 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7625 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7626 			break;
   7627 		}
   7628 
   7629 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7630 
   7631 		DPRINTF(WM_DEBUG_TX,
   7632 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7633 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7634 
   7635 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7636 
   7637 		/*
   7638 		 * Store a pointer to the packet so that we can free it
   7639 		 * later.
   7640 		 *
   7641 		 * Initially, we consider the number of descriptors the
   7642 		 * packet uses the number of DMA segments.  This may be
   7643 		 * incremented by 1 if we do checksum offload (a descriptor
   7644 		 * is used to set the checksum context).
   7645 		 */
   7646 		txs->txs_mbuf = m0;
   7647 		txs->txs_firstdesc = txq->txq_next;
   7648 		txs->txs_ndesc = segs_needed;
   7649 
   7650 		/* Set up offload parameters for this packet. */
   7651 		if (m0->m_pkthdr.csum_flags &
   7652 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7653 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7654 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7655 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7656 					  &cksumfields) != 0) {
   7657 				/* Error message already displayed. */
   7658 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7659 				continue;
   7660 			}
   7661 		} else {
   7662 			cksumcmd = 0;
   7663 			cksumfields = 0;
   7664 		}
   7665 
   7666 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7667 
   7668 		/* Sync the DMA map. */
   7669 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7670 		    BUS_DMASYNC_PREWRITE);
   7671 
   7672 		/* Initialize the transmit descriptor. */
   7673 		for (nexttx = txq->txq_next, seg = 0;
   7674 		     seg < dmamap->dm_nsegs; seg++) {
   7675 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7676 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7677 			     seglen != 0;
   7678 			     curaddr += curlen, seglen -= curlen,
   7679 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7680 				curlen = seglen;
   7681 
   7682 				/*
   7683 				 * So says the Linux driver:
   7684 				 * Work around for premature descriptor
   7685 				 * write-backs in TSO mode.  Append a
   7686 				 * 4-byte sentinel descriptor.
   7687 				 */
   7688 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7689 				    curlen > 8)
   7690 					curlen -= 4;
   7691 
   7692 				wm_set_dma_addr(
   7693 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7694 				txq->txq_descs[nexttx].wtx_cmdlen
   7695 				    = htole32(cksumcmd | curlen);
   7696 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7697 				    = 0;
   7698 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7699 				    = cksumfields;
   7700 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7701 				lasttx = nexttx;
   7702 
   7703 				DPRINTF(WM_DEBUG_TX,
   7704 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7705 					"len %#04zx\n",
   7706 					device_xname(sc->sc_dev), nexttx,
   7707 					(uint64_t)curaddr, curlen));
   7708 			}
   7709 		}
   7710 
   7711 		KASSERT(lasttx != -1);
   7712 
   7713 		/*
   7714 		 * Set up the command byte on the last descriptor of
   7715 		 * the packet. If we're in the interrupt delay window,
   7716 		 * delay the interrupt.
   7717 		 */
   7718 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7719 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7720 
   7721 		/*
   7722 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7723 		 * up the descriptor to encapsulate the packet for us.
   7724 		 *
   7725 		 * This is only valid on the last descriptor of the packet.
   7726 		 */
   7727 		if (vlan_has_tag(m0)) {
   7728 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7729 			    htole32(WTX_CMD_VLE);
   7730 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7731 			    = htole16(vlan_get_tag(m0));
   7732 		}
   7733 
   7734 		txs->txs_lastdesc = lasttx;
   7735 
   7736 		DPRINTF(WM_DEBUG_TX,
   7737 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7738 			device_xname(sc->sc_dev),
   7739 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7740 
   7741 		/* Sync the descriptors we're using. */
   7742 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7743 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7744 
   7745 		/* Give the packet to the chip. */
   7746 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7747 
   7748 		DPRINTF(WM_DEBUG_TX,
   7749 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7750 
   7751 		DPRINTF(WM_DEBUG_TX,
   7752 		    ("%s: TX: finished transmitting packet, job %d\n",
   7753 			device_xname(sc->sc_dev), txq->txq_snext));
   7754 
   7755 		/* Advance the tx pointer. */
   7756 		txq->txq_free -= txs->txs_ndesc;
   7757 		txq->txq_next = nexttx;
   7758 
   7759 		txq->txq_sfree--;
   7760 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7761 
   7762 		/* Pass the packet to any BPF listeners. */
   7763 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7764 	}
   7765 
   7766 	if (m0 != NULL) {
   7767 		if (!is_transmit)
   7768 			ifp->if_flags |= IFF_OACTIVE;
   7769 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7770 		WM_Q_EVCNT_INCR(txq, descdrop);
   7771 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7772 			__func__));
   7773 		m_freem(m0);
   7774 	}
   7775 
   7776 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7777 		/* No more slots; notify upper layer. */
   7778 		if (!is_transmit)
   7779 			ifp->if_flags |= IFF_OACTIVE;
   7780 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7781 	}
   7782 
   7783 	if (txq->txq_free != ofree) {
   7784 		/* Set a watchdog timer in case the chip flakes out. */
   7785 		txq->txq_lastsent = time_uptime;
   7786 		txq->txq_sending = true;
   7787 	}
   7788 }
   7789 
   7790 /*
   7791  * wm_nq_tx_offload:
   7792  *
   7793  *	Set up TCP/IP checksumming parameters for the
   7794  *	specified packet, for NEWQUEUE devices
   7795  */
   7796 static int
   7797 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7798     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7799 {
   7800 	struct mbuf *m0 = txs->txs_mbuf;
   7801 	uint32_t vl_len, mssidx, cmdc;
   7802 	struct ether_header *eh;
   7803 	int offset, iphl;
   7804 
   7805 	/*
   7806 	 * XXX It would be nice if the mbuf pkthdr had offset
   7807 	 * fields for the protocol headers.
   7808 	 */
   7809 	*cmdlenp = 0;
   7810 	*fieldsp = 0;
   7811 
   7812 	eh = mtod(m0, struct ether_header *);
   7813 	switch (htons(eh->ether_type)) {
   7814 	case ETHERTYPE_IP:
   7815 	case ETHERTYPE_IPV6:
   7816 		offset = ETHER_HDR_LEN;
   7817 		break;
   7818 
   7819 	case ETHERTYPE_VLAN:
   7820 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7821 		break;
   7822 
   7823 	default:
   7824 		/* Don't support this protocol or encapsulation. */
   7825 		*do_csum = false;
   7826 		return 0;
   7827 	}
   7828 	*do_csum = true;
   7829 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7830 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7831 
   7832 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7833 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7834 
   7835 	if ((m0->m_pkthdr.csum_flags &
   7836 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7837 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7838 	} else {
   7839 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7840 	}
   7841 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7842 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7843 
   7844 	if (vlan_has_tag(m0)) {
   7845 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7846 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7847 		*cmdlenp |= NQTX_CMD_VLE;
   7848 	}
   7849 
   7850 	mssidx = 0;
   7851 
   7852 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7853 		int hlen = offset + iphl;
   7854 		int tcp_hlen;
   7855 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7856 
   7857 		if (__predict_false(m0->m_len <
   7858 				    (hlen + sizeof(struct tcphdr)))) {
   7859 			/*
   7860 			 * TCP/IP headers are not in the first mbuf; we need
   7861 			 * to do this the slow and painful way. Let's just
   7862 			 * hope this doesn't happen very often.
   7863 			 */
   7864 			struct tcphdr th;
   7865 
   7866 			WM_Q_EVCNT_INCR(txq, tsopain);
   7867 
   7868 			m_copydata(m0, hlen, sizeof(th), &th);
   7869 			if (v4) {
   7870 				struct ip ip;
   7871 
   7872 				m_copydata(m0, offset, sizeof(ip), &ip);
   7873 				ip.ip_len = 0;
   7874 				m_copyback(m0,
   7875 				    offset + offsetof(struct ip, ip_len),
   7876 				    sizeof(ip.ip_len), &ip.ip_len);
   7877 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7878 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7879 			} else {
   7880 				struct ip6_hdr ip6;
   7881 
   7882 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7883 				ip6.ip6_plen = 0;
   7884 				m_copyback(m0,
   7885 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7886 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7887 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7888 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7889 			}
   7890 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7891 			    sizeof(th.th_sum), &th.th_sum);
   7892 
   7893 			tcp_hlen = th.th_off << 2;
   7894 		} else {
   7895 			/*
   7896 			 * TCP/IP headers are in the first mbuf; we can do
   7897 			 * this the easy way.
   7898 			 */
   7899 			struct tcphdr *th;
   7900 
   7901 			if (v4) {
   7902 				struct ip *ip =
   7903 				    (void *)(mtod(m0, char *) + offset);
   7904 				th = (void *)(mtod(m0, char *) + hlen);
   7905 
   7906 				ip->ip_len = 0;
   7907 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7908 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7909 			} else {
   7910 				struct ip6_hdr *ip6 =
   7911 				    (void *)(mtod(m0, char *) + offset);
   7912 				th = (void *)(mtod(m0, char *) + hlen);
   7913 
   7914 				ip6->ip6_plen = 0;
   7915 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7916 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7917 			}
   7918 			tcp_hlen = th->th_off << 2;
   7919 		}
   7920 		hlen += tcp_hlen;
   7921 		*cmdlenp |= NQTX_CMD_TSE;
   7922 
   7923 		if (v4) {
   7924 			WM_Q_EVCNT_INCR(txq, tso);
   7925 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7926 		} else {
   7927 			WM_Q_EVCNT_INCR(txq, tso6);
   7928 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7929 		}
   7930 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7931 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7932 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7933 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7934 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7935 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7936 	} else {
   7937 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7938 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7939 	}
   7940 
   7941 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7942 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7943 		cmdc |= NQTXC_CMD_IP4;
   7944 	}
   7945 
   7946 	if (m0->m_pkthdr.csum_flags &
   7947 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7948 		WM_Q_EVCNT_INCR(txq, tusum);
   7949 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7950 			cmdc |= NQTXC_CMD_TCP;
   7951 		else
   7952 			cmdc |= NQTXC_CMD_UDP;
   7953 
   7954 		cmdc |= NQTXC_CMD_IP4;
   7955 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7956 	}
   7957 	if (m0->m_pkthdr.csum_flags &
   7958 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7959 		WM_Q_EVCNT_INCR(txq, tusum6);
   7960 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7961 			cmdc |= NQTXC_CMD_TCP;
   7962 		else
   7963 			cmdc |= NQTXC_CMD_UDP;
   7964 
   7965 		cmdc |= NQTXC_CMD_IP6;
   7966 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7967 	}
   7968 
   7969 	/*
   7970 	 * We don't have to write context descriptor for every packet to
   7971 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7972 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7973 	 * controllers.
   7974 	 * It would be overhead to write context descriptor for every packet,
   7975 	 * however it does not cause problems.
   7976 	 */
   7977 	/* Fill in the context descriptor. */
   7978 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7979 	    htole32(vl_len);
   7980 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7981 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7982 	    htole32(cmdc);
   7983 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7984 	    htole32(mssidx);
   7985 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7986 	DPRINTF(WM_DEBUG_TX,
   7987 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7988 		txq->txq_next, 0, vl_len));
   7989 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7990 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7991 	txs->txs_ndesc++;
   7992 	return 0;
   7993 }
   7994 
   7995 /*
   7996  * wm_nq_start:		[ifnet interface function]
   7997  *
   7998  *	Start packet transmission on the interface for NEWQUEUE devices
   7999  */
   8000 static void
   8001 wm_nq_start(struct ifnet *ifp)
   8002 {
   8003 	struct wm_softc *sc = ifp->if_softc;
   8004 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8005 
   8006 #ifdef WM_MPSAFE
   8007 	KASSERT(if_is_mpsafe(ifp));
   8008 #endif
   8009 	/*
   8010 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   8011 	 */
   8012 
   8013 	mutex_enter(txq->txq_lock);
   8014 	if (!txq->txq_stopping)
   8015 		wm_nq_start_locked(ifp);
   8016 	mutex_exit(txq->txq_lock);
   8017 }
   8018 
   8019 static void
   8020 wm_nq_start_locked(struct ifnet *ifp)
   8021 {
   8022 	struct wm_softc *sc = ifp->if_softc;
   8023 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8024 
   8025 	wm_nq_send_common_locked(ifp, txq, false);
   8026 }
   8027 
   8028 static int
   8029 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8030 {
   8031 	int qid;
   8032 	struct wm_softc *sc = ifp->if_softc;
   8033 	struct wm_txqueue *txq;
   8034 
   8035 	qid = wm_select_txqueue(ifp, m);
   8036 	txq = &sc->sc_queue[qid].wmq_txq;
   8037 
   8038 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8039 		m_freem(m);
   8040 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8041 		return ENOBUFS;
   8042 	}
   8043 
   8044 	/*
   8045 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   8046 	 */
   8047 	ifp->if_obytes += m->m_pkthdr.len;
   8048 	if (m->m_flags & M_MCAST)
   8049 		ifp->if_omcasts++;
   8050 
   8051 	/*
   8052 	 * The situations which this mutex_tryenter() fails at running time
   8053 	 * are below two patterns.
   8054 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8055 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8056 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8057 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8058 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8059 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8060 	 * stuck, either.
   8061 	 */
   8062 	if (mutex_tryenter(txq->txq_lock)) {
   8063 		if (!txq->txq_stopping)
   8064 			wm_nq_transmit_locked(ifp, txq);
   8065 		mutex_exit(txq->txq_lock);
   8066 	}
   8067 
   8068 	return 0;
   8069 }
   8070 
   8071 static void
   8072 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8073 {
   8074 
   8075 	wm_nq_send_common_locked(ifp, txq, true);
   8076 }
   8077 
   8078 static void
   8079 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8080     bool is_transmit)
   8081 {
   8082 	struct wm_softc *sc = ifp->if_softc;
   8083 	struct mbuf *m0;
   8084 	struct wm_txsoft *txs;
   8085 	bus_dmamap_t dmamap;
   8086 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8087 	bool do_csum, sent;
   8088 	bool remap = true;
   8089 
   8090 	KASSERT(mutex_owned(txq->txq_lock));
   8091 
   8092 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8093 		return;
   8094 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8095 		return;
   8096 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8097 		return;
   8098 
   8099 	sent = false;
   8100 
   8101 	/*
   8102 	 * Loop through the send queue, setting up transmit descriptors
   8103 	 * until we drain the queue, or use up all available transmit
   8104 	 * descriptors.
   8105 	 */
   8106 	for (;;) {
   8107 		m0 = NULL;
   8108 
   8109 		/* Get a work queue entry. */
   8110 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8111 			wm_txeof(txq, UINT_MAX);
   8112 			if (txq->txq_sfree == 0) {
   8113 				DPRINTF(WM_DEBUG_TX,
   8114 				    ("%s: TX: no free job descriptors\n",
   8115 					device_xname(sc->sc_dev)));
   8116 				WM_Q_EVCNT_INCR(txq, txsstall);
   8117 				break;
   8118 			}
   8119 		}
   8120 
   8121 		/* Grab a packet off the queue. */
   8122 		if (is_transmit)
   8123 			m0 = pcq_get(txq->txq_interq);
   8124 		else
   8125 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8126 		if (m0 == NULL)
   8127 			break;
   8128 
   8129 		DPRINTF(WM_DEBUG_TX,
   8130 		    ("%s: TX: have packet to transmit: %p\n",
   8131 		    device_xname(sc->sc_dev), m0));
   8132 
   8133 		txs = &txq->txq_soft[txq->txq_snext];
   8134 		dmamap = txs->txs_dmamap;
   8135 
   8136 		/*
   8137 		 * Load the DMA map.  If this fails, the packet either
   8138 		 * didn't fit in the allotted number of segments, or we
   8139 		 * were short on resources.  For the too-many-segments
   8140 		 * case, we simply report an error and drop the packet,
   8141 		 * since we can't sanely copy a jumbo packet to a single
   8142 		 * buffer.
   8143 		 */
   8144 retry:
   8145 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8146 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8147 		if (__predict_false(error)) {
   8148 			if (error == EFBIG) {
   8149 				if (remap == true) {
   8150 					struct mbuf *m;
   8151 
   8152 					remap = false;
   8153 					m = m_defrag(m0, M_NOWAIT);
   8154 					if (m != NULL) {
   8155 						WM_Q_EVCNT_INCR(txq, defrag);
   8156 						m0 = m;
   8157 						goto retry;
   8158 					}
   8159 				}
   8160 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8161 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8162 				    "DMA segments, dropping...\n",
   8163 				    device_xname(sc->sc_dev));
   8164 				wm_dump_mbuf_chain(sc, m0);
   8165 				m_freem(m0);
   8166 				continue;
   8167 			}
   8168 			/* Short on resources, just stop for now. */
   8169 			DPRINTF(WM_DEBUG_TX,
   8170 			    ("%s: TX: dmamap load failed: %d\n",
   8171 				device_xname(sc->sc_dev), error));
   8172 			break;
   8173 		}
   8174 
   8175 		segs_needed = dmamap->dm_nsegs;
   8176 
   8177 		/*
   8178 		 * Ensure we have enough descriptors free to describe
   8179 		 * the packet. Note, we always reserve one descriptor
   8180 		 * at the end of the ring due to the semantics of the
   8181 		 * TDT register, plus one more in the event we need
   8182 		 * to load offload context.
   8183 		 */
   8184 		if (segs_needed > txq->txq_free - 2) {
   8185 			/*
   8186 			 * Not enough free descriptors to transmit this
   8187 			 * packet.  We haven't committed anything yet,
   8188 			 * so just unload the DMA map, put the packet
   8189 			 * pack on the queue, and punt. Notify the upper
   8190 			 * layer that there are no more slots left.
   8191 			 */
   8192 			DPRINTF(WM_DEBUG_TX,
   8193 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8194 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8195 				segs_needed, txq->txq_free - 1));
   8196 			if (!is_transmit)
   8197 				ifp->if_flags |= IFF_OACTIVE;
   8198 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8199 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8200 			WM_Q_EVCNT_INCR(txq, txdstall);
   8201 			break;
   8202 		}
   8203 
   8204 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8205 
   8206 		DPRINTF(WM_DEBUG_TX,
   8207 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8208 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8209 
   8210 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8211 
   8212 		/*
   8213 		 * Store a pointer to the packet so that we can free it
   8214 		 * later.
   8215 		 *
   8216 		 * Initially, we consider the number of descriptors the
   8217 		 * packet uses the number of DMA segments.  This may be
   8218 		 * incremented by 1 if we do checksum offload (a descriptor
   8219 		 * is used to set the checksum context).
   8220 		 */
   8221 		txs->txs_mbuf = m0;
   8222 		txs->txs_firstdesc = txq->txq_next;
   8223 		txs->txs_ndesc = segs_needed;
   8224 
   8225 		/* Set up offload parameters for this packet. */
   8226 		uint32_t cmdlen, fields, dcmdlen;
   8227 		if (m0->m_pkthdr.csum_flags &
   8228 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8229 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8230 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8231 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8232 			    &do_csum) != 0) {
   8233 				/* Error message already displayed. */
   8234 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8235 				continue;
   8236 			}
   8237 		} else {
   8238 			do_csum = false;
   8239 			cmdlen = 0;
   8240 			fields = 0;
   8241 		}
   8242 
   8243 		/* Sync the DMA map. */
   8244 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8245 		    BUS_DMASYNC_PREWRITE);
   8246 
   8247 		/* Initialize the first transmit descriptor. */
   8248 		nexttx = txq->txq_next;
   8249 		if (!do_csum) {
   8250 			/* setup a legacy descriptor */
   8251 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8252 			    dmamap->dm_segs[0].ds_addr);
   8253 			txq->txq_descs[nexttx].wtx_cmdlen =
   8254 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8255 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8256 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8257 			if (vlan_has_tag(m0)) {
   8258 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8259 				    htole32(WTX_CMD_VLE);
   8260 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8261 				    htole16(vlan_get_tag(m0));
   8262 			} else
   8263 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8264 
   8265 			dcmdlen = 0;
   8266 		} else {
   8267 			/* setup an advanced data descriptor */
   8268 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8269 			    htole64(dmamap->dm_segs[0].ds_addr);
   8270 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8271 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8272 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8273 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8274 			    htole32(fields);
   8275 			DPRINTF(WM_DEBUG_TX,
   8276 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8277 				device_xname(sc->sc_dev), nexttx,
   8278 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8279 			DPRINTF(WM_DEBUG_TX,
   8280 			    ("\t 0x%08x%08x\n", fields,
   8281 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8282 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8283 		}
   8284 
   8285 		lasttx = nexttx;
   8286 		nexttx = WM_NEXTTX(txq, nexttx);
   8287 		/*
   8288 		 * fill in the next descriptors. legacy or advanced format
   8289 		 * is the same here
   8290 		 */
   8291 		for (seg = 1; seg < dmamap->dm_nsegs;
   8292 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8293 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8294 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8295 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8296 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8297 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8298 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8299 			lasttx = nexttx;
   8300 
   8301 			DPRINTF(WM_DEBUG_TX,
   8302 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8303 				device_xname(sc->sc_dev), nexttx,
   8304 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8305 				dmamap->dm_segs[seg].ds_len));
   8306 		}
   8307 
   8308 		KASSERT(lasttx != -1);
   8309 
   8310 		/*
   8311 		 * Set up the command byte on the last descriptor of
   8312 		 * the packet. If we're in the interrupt delay window,
   8313 		 * delay the interrupt.
   8314 		 */
   8315 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8316 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8317 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8318 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8319 
   8320 		txs->txs_lastdesc = lasttx;
   8321 
   8322 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8323 		    device_xname(sc->sc_dev),
   8324 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8325 
   8326 		/* Sync the descriptors we're using. */
   8327 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8328 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8329 
   8330 		/* Give the packet to the chip. */
   8331 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8332 		sent = true;
   8333 
   8334 		DPRINTF(WM_DEBUG_TX,
   8335 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8336 
   8337 		DPRINTF(WM_DEBUG_TX,
   8338 		    ("%s: TX: finished transmitting packet, job %d\n",
   8339 			device_xname(sc->sc_dev), txq->txq_snext));
   8340 
   8341 		/* Advance the tx pointer. */
   8342 		txq->txq_free -= txs->txs_ndesc;
   8343 		txq->txq_next = nexttx;
   8344 
   8345 		txq->txq_sfree--;
   8346 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8347 
   8348 		/* Pass the packet to any BPF listeners. */
   8349 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8350 	}
   8351 
   8352 	if (m0 != NULL) {
   8353 		if (!is_transmit)
   8354 			ifp->if_flags |= IFF_OACTIVE;
   8355 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8356 		WM_Q_EVCNT_INCR(txq, descdrop);
   8357 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8358 			__func__));
   8359 		m_freem(m0);
   8360 	}
   8361 
   8362 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8363 		/* No more slots; notify upper layer. */
   8364 		if (!is_transmit)
   8365 			ifp->if_flags |= IFF_OACTIVE;
   8366 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8367 	}
   8368 
   8369 	if (sent) {
   8370 		/* Set a watchdog timer in case the chip flakes out. */
   8371 		txq->txq_lastsent = time_uptime;
   8372 		txq->txq_sending = true;
   8373 	}
   8374 }
   8375 
   8376 static void
   8377 wm_deferred_start_locked(struct wm_txqueue *txq)
   8378 {
   8379 	struct wm_softc *sc = txq->txq_sc;
   8380 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8381 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8382 	int qid = wmq->wmq_id;
   8383 
   8384 	KASSERT(mutex_owned(txq->txq_lock));
   8385 
   8386 	if (txq->txq_stopping) {
   8387 		mutex_exit(txq->txq_lock);
   8388 		return;
   8389 	}
   8390 
   8391 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8392 		/* XXX need for ALTQ or one CPU system */
   8393 		if (qid == 0)
   8394 			wm_nq_start_locked(ifp);
   8395 		wm_nq_transmit_locked(ifp, txq);
   8396 	} else {
   8397 		/* XXX need for ALTQ or one CPU system */
   8398 		if (qid == 0)
   8399 			wm_start_locked(ifp);
   8400 		wm_transmit_locked(ifp, txq);
   8401 	}
   8402 }
   8403 
   8404 /* Interrupt */
   8405 
   8406 /*
   8407  * wm_txeof:
   8408  *
   8409  *	Helper; handle transmit interrupts.
   8410  */
   8411 static bool
   8412 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8413 {
   8414 	struct wm_softc *sc = txq->txq_sc;
   8415 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8416 	struct wm_txsoft *txs;
   8417 	int count = 0;
   8418 	int i;
   8419 	uint8_t status;
   8420 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8421 	bool more = false;
   8422 
   8423 	KASSERT(mutex_owned(txq->txq_lock));
   8424 
   8425 	if (txq->txq_stopping)
   8426 		return false;
   8427 
   8428 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8429 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8430 	if (wmq->wmq_id == 0)
   8431 		ifp->if_flags &= ~IFF_OACTIVE;
   8432 
   8433 	/*
   8434 	 * Go through the Tx list and free mbufs for those
   8435 	 * frames which have been transmitted.
   8436 	 */
   8437 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8438 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8439 		if (limit-- == 0) {
   8440 			more = true;
   8441 			DPRINTF(WM_DEBUG_TX,
   8442 			    ("%s: TX: loop limited, job %d is not processed\n",
   8443 				device_xname(sc->sc_dev), i));
   8444 			break;
   8445 		}
   8446 
   8447 		txs = &txq->txq_soft[i];
   8448 
   8449 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8450 			device_xname(sc->sc_dev), i));
   8451 
   8452 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8453 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8454 
   8455 		status =
   8456 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8457 		if ((status & WTX_ST_DD) == 0) {
   8458 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8459 			    BUS_DMASYNC_PREREAD);
   8460 			break;
   8461 		}
   8462 
   8463 		count++;
   8464 		DPRINTF(WM_DEBUG_TX,
   8465 		    ("%s: TX: job %d done: descs %d..%d\n",
   8466 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8467 		    txs->txs_lastdesc));
   8468 
   8469 		/*
   8470 		 * XXX We should probably be using the statistics
   8471 		 * XXX registers, but I don't know if they exist
   8472 		 * XXX on chips before the i82544.
   8473 		 */
   8474 
   8475 #ifdef WM_EVENT_COUNTERS
   8476 		if (status & WTX_ST_TU)
   8477 			WM_Q_EVCNT_INCR(txq, underrun);
   8478 #endif /* WM_EVENT_COUNTERS */
   8479 
   8480 		/*
   8481 		 * 82574 and newer's document says the status field has neither
   8482 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8483 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8484 		 * Developer's Manual", 82574 datasheet and newer.
   8485 		 *
   8486 		 * XXX I saw the LC bit was set on I218 even though the media
   8487 		 * was full duplex, so the bit might be used for other
   8488 		 * meaning ...(I have no document).
   8489 		 */
   8490 
   8491 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8492 		    && ((sc->sc_type < WM_T_82574)
   8493 			|| (sc->sc_type == WM_T_80003))) {
   8494 			ifp->if_oerrors++;
   8495 			if (status & WTX_ST_LC)
   8496 				log(LOG_WARNING, "%s: late collision\n",
   8497 				    device_xname(sc->sc_dev));
   8498 			else if (status & WTX_ST_EC) {
   8499 				ifp->if_collisions +=
   8500 				    TX_COLLISION_THRESHOLD + 1;
   8501 				log(LOG_WARNING, "%s: excessive collisions\n",
   8502 				    device_xname(sc->sc_dev));
   8503 			}
   8504 		} else
   8505 			ifp->if_opackets++;
   8506 
   8507 		txq->txq_packets++;
   8508 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8509 
   8510 		txq->txq_free += txs->txs_ndesc;
   8511 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8512 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8513 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8514 		m_freem(txs->txs_mbuf);
   8515 		txs->txs_mbuf = NULL;
   8516 	}
   8517 
   8518 	/* Update the dirty transmit buffer pointer. */
   8519 	txq->txq_sdirty = i;
   8520 	DPRINTF(WM_DEBUG_TX,
   8521 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8522 
   8523 	if (count != 0)
   8524 		rnd_add_uint32(&sc->rnd_source, count);
   8525 
   8526 	/*
   8527 	 * If there are no more pending transmissions, cancel the watchdog
   8528 	 * timer.
   8529 	 */
   8530 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8531 		txq->txq_sending = false;
   8532 
   8533 	return more;
   8534 }
   8535 
   8536 static inline uint32_t
   8537 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8538 {
   8539 	struct wm_softc *sc = rxq->rxq_sc;
   8540 
   8541 	if (sc->sc_type == WM_T_82574)
   8542 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8543 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8544 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8545 	else
   8546 		return rxq->rxq_descs[idx].wrx_status;
   8547 }
   8548 
   8549 static inline uint32_t
   8550 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8551 {
   8552 	struct wm_softc *sc = rxq->rxq_sc;
   8553 
   8554 	if (sc->sc_type == WM_T_82574)
   8555 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8556 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8557 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8558 	else
   8559 		return rxq->rxq_descs[idx].wrx_errors;
   8560 }
   8561 
   8562 static inline uint16_t
   8563 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8564 {
   8565 	struct wm_softc *sc = rxq->rxq_sc;
   8566 
   8567 	if (sc->sc_type == WM_T_82574)
   8568 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8569 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8570 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8571 	else
   8572 		return rxq->rxq_descs[idx].wrx_special;
   8573 }
   8574 
   8575 static inline int
   8576 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8577 {
   8578 	struct wm_softc *sc = rxq->rxq_sc;
   8579 
   8580 	if (sc->sc_type == WM_T_82574)
   8581 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8582 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8583 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8584 	else
   8585 		return rxq->rxq_descs[idx].wrx_len;
   8586 }
   8587 
   8588 #ifdef WM_DEBUG
   8589 static inline uint32_t
   8590 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8591 {
   8592 	struct wm_softc *sc = rxq->rxq_sc;
   8593 
   8594 	if (sc->sc_type == WM_T_82574)
   8595 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8596 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8597 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8598 	else
   8599 		return 0;
   8600 }
   8601 
   8602 static inline uint8_t
   8603 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8604 {
   8605 	struct wm_softc *sc = rxq->rxq_sc;
   8606 
   8607 	if (sc->sc_type == WM_T_82574)
   8608 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8609 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8610 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8611 	else
   8612 		return 0;
   8613 }
   8614 #endif /* WM_DEBUG */
   8615 
   8616 static inline bool
   8617 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8618     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8619 {
   8620 
   8621 	if (sc->sc_type == WM_T_82574)
   8622 		return (status & ext_bit) != 0;
   8623 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8624 		return (status & nq_bit) != 0;
   8625 	else
   8626 		return (status & legacy_bit) != 0;
   8627 }
   8628 
   8629 static inline bool
   8630 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8631     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8632 {
   8633 
   8634 	if (sc->sc_type == WM_T_82574)
   8635 		return (error & ext_bit) != 0;
   8636 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8637 		return (error & nq_bit) != 0;
   8638 	else
   8639 		return (error & legacy_bit) != 0;
   8640 }
   8641 
   8642 static inline bool
   8643 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8644 {
   8645 
   8646 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8647 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8648 		return true;
   8649 	else
   8650 		return false;
   8651 }
   8652 
   8653 static inline bool
   8654 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8655 {
   8656 	struct wm_softc *sc = rxq->rxq_sc;
   8657 
   8658 	/* XXXX missing error bit for newqueue? */
   8659 	if (wm_rxdesc_is_set_error(sc, errors,
   8660 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8661 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8662 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8663 		NQRXC_ERROR_RXE)) {
   8664 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8665 		    EXTRXC_ERROR_SE, 0))
   8666 			log(LOG_WARNING, "%s: symbol error\n",
   8667 			    device_xname(sc->sc_dev));
   8668 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8669 		    EXTRXC_ERROR_SEQ, 0))
   8670 			log(LOG_WARNING, "%s: receive sequence error\n",
   8671 			    device_xname(sc->sc_dev));
   8672 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8673 		    EXTRXC_ERROR_CE, 0))
   8674 			log(LOG_WARNING, "%s: CRC error\n",
   8675 			    device_xname(sc->sc_dev));
   8676 		return true;
   8677 	}
   8678 
   8679 	return false;
   8680 }
   8681 
   8682 static inline bool
   8683 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8684 {
   8685 	struct wm_softc *sc = rxq->rxq_sc;
   8686 
   8687 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8688 		NQRXC_STATUS_DD)) {
   8689 		/* We have processed all of the receive descriptors. */
   8690 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8691 		return false;
   8692 	}
   8693 
   8694 	return true;
   8695 }
   8696 
   8697 static inline bool
   8698 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8699     uint16_t vlantag, struct mbuf *m)
   8700 {
   8701 
   8702 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8703 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8704 		vlan_set_tag(m, le16toh(vlantag));
   8705 	}
   8706 
   8707 	return true;
   8708 }
   8709 
   8710 static inline void
   8711 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8712     uint32_t errors, struct mbuf *m)
   8713 {
   8714 	struct wm_softc *sc = rxq->rxq_sc;
   8715 
   8716 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8717 		if (wm_rxdesc_is_set_status(sc, status,
   8718 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8719 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8720 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8721 			if (wm_rxdesc_is_set_error(sc, errors,
   8722 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8723 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8724 		}
   8725 		if (wm_rxdesc_is_set_status(sc, status,
   8726 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8727 			/*
   8728 			 * Note: we don't know if this was TCP or UDP,
   8729 			 * so we just set both bits, and expect the
   8730 			 * upper layers to deal.
   8731 			 */
   8732 			WM_Q_EVCNT_INCR(rxq, tusum);
   8733 			m->m_pkthdr.csum_flags |=
   8734 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8735 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8736 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8737 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8738 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8739 		}
   8740 	}
   8741 }
   8742 
   8743 /*
   8744  * wm_rxeof:
   8745  *
   8746  *	Helper; handle receive interrupts.
   8747  */
   8748 static bool
   8749 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8750 {
   8751 	struct wm_softc *sc = rxq->rxq_sc;
   8752 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8753 	struct wm_rxsoft *rxs;
   8754 	struct mbuf *m;
   8755 	int i, len;
   8756 	int count = 0;
   8757 	uint32_t status, errors;
   8758 	uint16_t vlantag;
   8759 	bool more = false;
   8760 
   8761 	KASSERT(mutex_owned(rxq->rxq_lock));
   8762 
   8763 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8764 		if (limit-- == 0) {
   8765 			rxq->rxq_ptr = i;
   8766 			more = true;
   8767 			DPRINTF(WM_DEBUG_RX,
   8768 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8769 				device_xname(sc->sc_dev), i));
   8770 			break;
   8771 		}
   8772 
   8773 		rxs = &rxq->rxq_soft[i];
   8774 
   8775 		DPRINTF(WM_DEBUG_RX,
   8776 		    ("%s: RX: checking descriptor %d\n",
   8777 			device_xname(sc->sc_dev), i));
   8778 		wm_cdrxsync(rxq, i,
   8779 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8780 
   8781 		status = wm_rxdesc_get_status(rxq, i);
   8782 		errors = wm_rxdesc_get_errors(rxq, i);
   8783 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8784 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8785 #ifdef WM_DEBUG
   8786 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8787 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8788 #endif
   8789 
   8790 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8791 			/*
   8792 			 * Update the receive pointer holding rxq_lock
   8793 			 * consistent with increment counter.
   8794 			 */
   8795 			rxq->rxq_ptr = i;
   8796 			break;
   8797 		}
   8798 
   8799 		count++;
   8800 		if (__predict_false(rxq->rxq_discard)) {
   8801 			DPRINTF(WM_DEBUG_RX,
   8802 			    ("%s: RX: discarding contents of descriptor %d\n",
   8803 				device_xname(sc->sc_dev), i));
   8804 			wm_init_rxdesc(rxq, i);
   8805 			if (wm_rxdesc_is_eop(rxq, status)) {
   8806 				/* Reset our state. */
   8807 				DPRINTF(WM_DEBUG_RX,
   8808 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8809 					device_xname(sc->sc_dev)));
   8810 				rxq->rxq_discard = 0;
   8811 			}
   8812 			continue;
   8813 		}
   8814 
   8815 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8816 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8817 
   8818 		m = rxs->rxs_mbuf;
   8819 
   8820 		/*
   8821 		 * Add a new receive buffer to the ring, unless of
   8822 		 * course the length is zero. Treat the latter as a
   8823 		 * failed mapping.
   8824 		 */
   8825 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8826 			/*
   8827 			 * Failed, throw away what we've done so
   8828 			 * far, and discard the rest of the packet.
   8829 			 */
   8830 			ifp->if_ierrors++;
   8831 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8832 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8833 			wm_init_rxdesc(rxq, i);
   8834 			if (!wm_rxdesc_is_eop(rxq, status))
   8835 				rxq->rxq_discard = 1;
   8836 			if (rxq->rxq_head != NULL)
   8837 				m_freem(rxq->rxq_head);
   8838 			WM_RXCHAIN_RESET(rxq);
   8839 			DPRINTF(WM_DEBUG_RX,
   8840 			    ("%s: RX: Rx buffer allocation failed, "
   8841 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8842 				rxq->rxq_discard ? " (discard)" : ""));
   8843 			continue;
   8844 		}
   8845 
   8846 		m->m_len = len;
   8847 		rxq->rxq_len += len;
   8848 		DPRINTF(WM_DEBUG_RX,
   8849 		    ("%s: RX: buffer at %p len %d\n",
   8850 			device_xname(sc->sc_dev), m->m_data, len));
   8851 
   8852 		/* If this is not the end of the packet, keep looking. */
   8853 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8854 			WM_RXCHAIN_LINK(rxq, m);
   8855 			DPRINTF(WM_DEBUG_RX,
   8856 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8857 				device_xname(sc->sc_dev), rxq->rxq_len));
   8858 			continue;
   8859 		}
   8860 
   8861 		/*
   8862 		 * Okay, we have the entire packet now. The chip is
   8863 		 * configured to include the FCS except I350 and I21[01]
   8864 		 * (not all chips can be configured to strip it),
   8865 		 * so we need to trim it.
   8866 		 * May need to adjust length of previous mbuf in the
   8867 		 * chain if the current mbuf is too short.
   8868 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8869 		 * is always set in I350, so we don't trim it.
   8870 		 */
   8871 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8872 		    && (sc->sc_type != WM_T_I210)
   8873 		    && (sc->sc_type != WM_T_I211)) {
   8874 			if (m->m_len < ETHER_CRC_LEN) {
   8875 				rxq->rxq_tail->m_len
   8876 				    -= (ETHER_CRC_LEN - m->m_len);
   8877 				m->m_len = 0;
   8878 			} else
   8879 				m->m_len -= ETHER_CRC_LEN;
   8880 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8881 		} else
   8882 			len = rxq->rxq_len;
   8883 
   8884 		WM_RXCHAIN_LINK(rxq, m);
   8885 
   8886 		*rxq->rxq_tailp = NULL;
   8887 		m = rxq->rxq_head;
   8888 
   8889 		WM_RXCHAIN_RESET(rxq);
   8890 
   8891 		DPRINTF(WM_DEBUG_RX,
   8892 		    ("%s: RX: have entire packet, len -> %d\n",
   8893 			device_xname(sc->sc_dev), len));
   8894 
   8895 		/* If an error occurred, update stats and drop the packet. */
   8896 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8897 			m_freem(m);
   8898 			continue;
   8899 		}
   8900 
   8901 		/* No errors.  Receive the packet. */
   8902 		m_set_rcvif(m, ifp);
   8903 		m->m_pkthdr.len = len;
   8904 		/*
   8905 		 * TODO
   8906 		 * should be save rsshash and rsstype to this mbuf.
   8907 		 */
   8908 		DPRINTF(WM_DEBUG_RX,
   8909 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8910 			device_xname(sc->sc_dev), rsstype, rsshash));
   8911 
   8912 		/*
   8913 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8914 		 * for us.  Associate the tag with the packet.
   8915 		 */
   8916 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8917 			continue;
   8918 
   8919 		/* Set up checksum info for this packet. */
   8920 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8921 		/*
   8922 		 * Update the receive pointer holding rxq_lock consistent with
   8923 		 * increment counter.
   8924 		 */
   8925 		rxq->rxq_ptr = i;
   8926 		rxq->rxq_packets++;
   8927 		rxq->rxq_bytes += len;
   8928 		mutex_exit(rxq->rxq_lock);
   8929 
   8930 		/* Pass it on. */
   8931 		if_percpuq_enqueue(sc->sc_ipq, m);
   8932 
   8933 		mutex_enter(rxq->rxq_lock);
   8934 
   8935 		if (rxq->rxq_stopping)
   8936 			break;
   8937 	}
   8938 
   8939 	if (count != 0)
   8940 		rnd_add_uint32(&sc->rnd_source, count);
   8941 
   8942 	DPRINTF(WM_DEBUG_RX,
   8943 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8944 
   8945 	return more;
   8946 }
   8947 
   8948 /*
   8949  * wm_linkintr_gmii:
   8950  *
   8951  *	Helper; handle link interrupts for GMII.
   8952  */
   8953 static void
   8954 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8955 {
   8956 	uint32_t status, reg;
   8957 	bool link;
   8958 
   8959 	KASSERT(WM_CORE_LOCKED(sc));
   8960 
   8961 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8962 		__func__));
   8963 
   8964 	if ((icr & ICR_LSC) == 0) {
   8965 		if (icr & ICR_RXSEQ)
   8966 			DPRINTF(WM_DEBUG_LINK,
   8967 			    ("%s: LINK Receive sequence error\n",
   8968 				device_xname(sc->sc_dev)));
   8969 		return;
   8970 	}
   8971 
   8972 	/* Link status changed */
   8973 	status = CSR_READ(sc, WMREG_STATUS);
   8974 	link = status & STATUS_LU;
   8975 	if (link)
   8976 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8977 			device_xname(sc->sc_dev),
   8978 			(status & STATUS_FD) ? "FDX" : "HDX"));
   8979 	else
   8980 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8981 			device_xname(sc->sc_dev)));
   8982 	if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8983 		wm_gig_downshift_workaround_ich8lan(sc);
   8984 
   8985 	if ((sc->sc_type == WM_T_ICH8)
   8986 	    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8987 		wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8988 	}
   8989 	DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8990 		device_xname(sc->sc_dev)));
   8991 	mii_pollstat(&sc->sc_mii);
   8992 	if (sc->sc_type == WM_T_82543) {
   8993 		int miistatus, active;
   8994 
   8995 		/*
   8996 		 * With 82543, we need to force speed and
   8997 		 * duplex on the MAC equal to what the PHY
   8998 		 * speed and duplex configuration is.
   8999 		 */
   9000 		miistatus = sc->sc_mii.mii_media_status;
   9001 
   9002 		if (miistatus & IFM_ACTIVE) {
   9003 			active = sc->sc_mii.mii_media_active;
   9004 			sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9005 			switch (IFM_SUBTYPE(active)) {
   9006 			case IFM_10_T:
   9007 				sc->sc_ctrl |= CTRL_SPEED_10;
   9008 				break;
   9009 			case IFM_100_TX:
   9010 				sc->sc_ctrl |= CTRL_SPEED_100;
   9011 				break;
   9012 			case IFM_1000_T:
   9013 				sc->sc_ctrl |= CTRL_SPEED_1000;
   9014 				break;
   9015 			default:
   9016 				/*
   9017 				 * fiber?
   9018 				 * Shoud not enter here.
   9019 				 */
   9020 				printf("unknown media (%x)\n", active);
   9021 				break;
   9022 			}
   9023 			if (active & IFM_FDX)
   9024 				sc->sc_ctrl |= CTRL_FD;
   9025 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9026 		}
   9027 	} else if (sc->sc_type == WM_T_PCH) {
   9028 		wm_k1_gig_workaround_hv(sc,
   9029 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9030 	}
   9031 
   9032 	/*
   9033 	 * I217 Packet Loss issue:
   9034 	 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9035 	 * on power up.
   9036 	 * Set the Beacon Duration for I217 to 8 usec
   9037 	 */
   9038 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9039 		reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9040 		reg &= ~FEXTNVM4_BEACON_DURATION;
   9041 		reg |= FEXTNVM4_BEACON_DURATION_8US;
   9042 		CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9043 	}
   9044 
   9045 	/* Work-around I218 hang issue */
   9046 	if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9047 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9048 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9049 	    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9050 		wm_k1_workaround_lpt_lp(sc, link);
   9051 
   9052 	if (sc->sc_type >= WM_T_PCH_LPT) {
   9053 		/*
   9054 		 * Set platform power management values for Latency
   9055 		 * Tolerance Reporting (LTR)
   9056 		 */
   9057 		wm_platform_pm_pch_lpt(sc,
   9058 		    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9059 	}
   9060 
   9061 	/* Clear link partner's EEE ability */
   9062 	sc->eee_lp_ability = 0;
   9063 
   9064 	/* FEXTNVM6 K1-off workaround */
   9065 	if (sc->sc_type == WM_T_PCH_SPT) {
   9066 		reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9067 		if (CSR_READ(sc, WMREG_PCIEANACFG) & FEXTNVM6_K1_OFF_ENABLE)
   9068 			reg |= FEXTNVM6_K1_OFF_ENABLE;
   9069 		else
   9070 			reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9071 		CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9072 	}
   9073 
   9074 	if (!link)
   9075 		return;
   9076 
   9077 	switch (sc->sc_type) {
   9078 	case WM_T_PCH2:
   9079 		wm_k1_workaround_lv(sc);
   9080 		/* FALLTHROUGH */
   9081 	case WM_T_PCH:
   9082 		if (sc->sc_phytype == WMPHY_82578)
   9083 			wm_link_stall_workaround_hv(sc);
   9084 		break;
   9085 	default:
   9086 		break;
   9087 	}
   9088 
   9089 	/* Enable/Disable EEE after link up */
   9090 	if (sc->sc_phytype > WMPHY_82579)
   9091 		wm_set_eee_pchlan(sc);
   9092 }
   9093 
   9094 /*
   9095  * wm_linkintr_tbi:
   9096  *
   9097  *	Helper; handle link interrupts for TBI mode.
   9098  */
   9099 static void
   9100 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9101 {
   9102 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9103 	uint32_t status;
   9104 
   9105 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9106 		__func__));
   9107 
   9108 	status = CSR_READ(sc, WMREG_STATUS);
   9109 	if (icr & ICR_LSC) {
   9110 		wm_check_for_link(sc);
   9111 		if (status & STATUS_LU) {
   9112 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9113 				device_xname(sc->sc_dev),
   9114 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9115 			/*
   9116 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9117 			 * so we should update sc->sc_ctrl
   9118 			 */
   9119 
   9120 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9121 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9122 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9123 			if (status & STATUS_FD)
   9124 				sc->sc_tctl |=
   9125 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9126 			else
   9127 				sc->sc_tctl |=
   9128 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9129 			if (sc->sc_ctrl & CTRL_TFCE)
   9130 				sc->sc_fcrtl |= FCRTL_XONE;
   9131 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9132 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9133 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9134 			sc->sc_tbi_linkup = 1;
   9135 			if_link_state_change(ifp, LINK_STATE_UP);
   9136 		} else {
   9137 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9138 				device_xname(sc->sc_dev)));
   9139 			sc->sc_tbi_linkup = 0;
   9140 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9141 		}
   9142 		/* Update LED */
   9143 		wm_tbi_serdes_set_linkled(sc);
   9144 	} else if (icr & ICR_RXSEQ)
   9145 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9146 			device_xname(sc->sc_dev)));
   9147 }
   9148 
   9149 /*
   9150  * wm_linkintr_serdes:
   9151  *
   9152  *	Helper; handle link interrupts for TBI mode.
   9153  */
   9154 static void
   9155 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9156 {
   9157 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9158 	struct mii_data *mii = &sc->sc_mii;
   9159 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9160 	uint32_t pcs_adv, pcs_lpab, reg;
   9161 
   9162 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9163 		__func__));
   9164 
   9165 	if (icr & ICR_LSC) {
   9166 		/* Check PCS */
   9167 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9168 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9169 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9170 				device_xname(sc->sc_dev)));
   9171 			mii->mii_media_status |= IFM_ACTIVE;
   9172 			sc->sc_tbi_linkup = 1;
   9173 			if_link_state_change(ifp, LINK_STATE_UP);
   9174 		} else {
   9175 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9176 				device_xname(sc->sc_dev)));
   9177 			mii->mii_media_status |= IFM_NONE;
   9178 			sc->sc_tbi_linkup = 0;
   9179 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9180 			wm_tbi_serdes_set_linkled(sc);
   9181 			return;
   9182 		}
   9183 		mii->mii_media_active |= IFM_1000_SX;
   9184 		if ((reg & PCS_LSTS_FDX) != 0)
   9185 			mii->mii_media_active |= IFM_FDX;
   9186 		else
   9187 			mii->mii_media_active |= IFM_HDX;
   9188 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9189 			/* Check flow */
   9190 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9191 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9192 				DPRINTF(WM_DEBUG_LINK,
   9193 				    ("XXX LINKOK but not ACOMP\n"));
   9194 				return;
   9195 			}
   9196 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9197 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9198 			DPRINTF(WM_DEBUG_LINK,
   9199 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9200 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9201 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9202 				mii->mii_media_active |= IFM_FLOW
   9203 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9204 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9205 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9206 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9207 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9208 				mii->mii_media_active |= IFM_FLOW
   9209 				    | IFM_ETH_TXPAUSE;
   9210 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9211 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9212 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9213 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9214 				mii->mii_media_active |= IFM_FLOW
   9215 				    | IFM_ETH_RXPAUSE;
   9216 		}
   9217 		/* Update LED */
   9218 		wm_tbi_serdes_set_linkled(sc);
   9219 	} else
   9220 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9221 		    device_xname(sc->sc_dev)));
   9222 }
   9223 
   9224 /*
   9225  * wm_linkintr:
   9226  *
   9227  *	Helper; handle link interrupts.
   9228  */
   9229 static void
   9230 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9231 {
   9232 
   9233 	KASSERT(WM_CORE_LOCKED(sc));
   9234 
   9235 	if (sc->sc_flags & WM_F_HAS_MII)
   9236 		wm_linkintr_gmii(sc, icr);
   9237 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9238 	    && ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)))
   9239 		wm_linkintr_serdes(sc, icr);
   9240 	else
   9241 		wm_linkintr_tbi(sc, icr);
   9242 }
   9243 
   9244 /*
   9245  * wm_intr_legacy:
   9246  *
   9247  *	Interrupt service routine for INTx and MSI.
   9248  */
   9249 static int
   9250 wm_intr_legacy(void *arg)
   9251 {
   9252 	struct wm_softc *sc = arg;
   9253 	struct wm_queue *wmq = &sc->sc_queue[0];
   9254 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9255 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9256 	uint32_t icr, rndval = 0;
   9257 	int handled = 0;
   9258 
   9259 	while (1 /* CONSTCOND */) {
   9260 		icr = CSR_READ(sc, WMREG_ICR);
   9261 		if ((icr & sc->sc_icr) == 0)
   9262 			break;
   9263 		if (handled == 0)
   9264 			DPRINTF(WM_DEBUG_TX,
   9265 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9266 		if (rndval == 0)
   9267 			rndval = icr;
   9268 
   9269 		mutex_enter(rxq->rxq_lock);
   9270 
   9271 		if (rxq->rxq_stopping) {
   9272 			mutex_exit(rxq->rxq_lock);
   9273 			break;
   9274 		}
   9275 
   9276 		handled = 1;
   9277 
   9278 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9279 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9280 			DPRINTF(WM_DEBUG_RX,
   9281 			    ("%s: RX: got Rx intr 0x%08x\n",
   9282 				device_xname(sc->sc_dev),
   9283 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9284 			WM_Q_EVCNT_INCR(rxq, intr);
   9285 		}
   9286 #endif
   9287 		/*
   9288 		 * wm_rxeof() does *not* call upper layer functions directly,
   9289 		 * as if_percpuq_enqueue() just call softint_schedule().
   9290 		 * So, we can call wm_rxeof() in interrupt context.
   9291 		 */
   9292 		wm_rxeof(rxq, UINT_MAX);
   9293 
   9294 		mutex_exit(rxq->rxq_lock);
   9295 		mutex_enter(txq->txq_lock);
   9296 
   9297 		if (txq->txq_stopping) {
   9298 			mutex_exit(txq->txq_lock);
   9299 			break;
   9300 		}
   9301 
   9302 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9303 		if (icr & ICR_TXDW) {
   9304 			DPRINTF(WM_DEBUG_TX,
   9305 			    ("%s: TX: got TXDW interrupt\n",
   9306 				device_xname(sc->sc_dev)));
   9307 			WM_Q_EVCNT_INCR(txq, txdw);
   9308 		}
   9309 #endif
   9310 		wm_txeof(txq, UINT_MAX);
   9311 
   9312 		mutex_exit(txq->txq_lock);
   9313 		WM_CORE_LOCK(sc);
   9314 
   9315 		if (sc->sc_core_stopping) {
   9316 			WM_CORE_UNLOCK(sc);
   9317 			break;
   9318 		}
   9319 
   9320 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9321 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9322 			wm_linkintr(sc, icr);
   9323 		}
   9324 
   9325 		WM_CORE_UNLOCK(sc);
   9326 
   9327 		if (icr & ICR_RXO) {
   9328 #if defined(WM_DEBUG)
   9329 			log(LOG_WARNING, "%s: Receive overrun\n",
   9330 			    device_xname(sc->sc_dev));
   9331 #endif /* defined(WM_DEBUG) */
   9332 		}
   9333 	}
   9334 
   9335 	rnd_add_uint32(&sc->rnd_source, rndval);
   9336 
   9337 	if (handled) {
   9338 		/* Try to get more packets going. */
   9339 		softint_schedule(wmq->wmq_si);
   9340 	}
   9341 
   9342 	return handled;
   9343 }
   9344 
   9345 static inline void
   9346 wm_txrxintr_disable(struct wm_queue *wmq)
   9347 {
   9348 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9349 
   9350 	if (sc->sc_type == WM_T_82574)
   9351 		CSR_WRITE(sc, WMREG_IMC,
   9352 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9353 	else if (sc->sc_type == WM_T_82575)
   9354 		CSR_WRITE(sc, WMREG_EIMC,
   9355 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9356 	else
   9357 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9358 }
   9359 
   9360 static inline void
   9361 wm_txrxintr_enable(struct wm_queue *wmq)
   9362 {
   9363 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9364 
   9365 	wm_itrs_calculate(sc, wmq);
   9366 
   9367 	/*
   9368 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9369 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9370 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9371 	 * while each wm_handle_queue(wmq) is runnig.
   9372 	 */
   9373 	if (sc->sc_type == WM_T_82574)
   9374 		CSR_WRITE(sc, WMREG_IMS,
   9375 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9376 	else if (sc->sc_type == WM_T_82575)
   9377 		CSR_WRITE(sc, WMREG_EIMS,
   9378 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9379 	else
   9380 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9381 }
   9382 
   9383 static int
   9384 wm_txrxintr_msix(void *arg)
   9385 {
   9386 	struct wm_queue *wmq = arg;
   9387 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9388 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9389 	struct wm_softc *sc = txq->txq_sc;
   9390 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9391 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9392 	bool txmore;
   9393 	bool rxmore;
   9394 
   9395 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9396 
   9397 	DPRINTF(WM_DEBUG_TX,
   9398 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9399 
   9400 	wm_txrxintr_disable(wmq);
   9401 
   9402 	mutex_enter(txq->txq_lock);
   9403 
   9404 	if (txq->txq_stopping) {
   9405 		mutex_exit(txq->txq_lock);
   9406 		return 0;
   9407 	}
   9408 
   9409 	WM_Q_EVCNT_INCR(txq, txdw);
   9410 	txmore = wm_txeof(txq, txlimit);
   9411 	/* wm_deferred start() is done in wm_handle_queue(). */
   9412 	mutex_exit(txq->txq_lock);
   9413 
   9414 	DPRINTF(WM_DEBUG_RX,
   9415 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9416 	mutex_enter(rxq->rxq_lock);
   9417 
   9418 	if (rxq->rxq_stopping) {
   9419 		mutex_exit(rxq->rxq_lock);
   9420 		return 0;
   9421 	}
   9422 
   9423 	WM_Q_EVCNT_INCR(rxq, intr);
   9424 	rxmore = wm_rxeof(rxq, rxlimit);
   9425 	mutex_exit(rxq->rxq_lock);
   9426 
   9427 	wm_itrs_writereg(sc, wmq);
   9428 
   9429 	if (txmore || rxmore)
   9430 		softint_schedule(wmq->wmq_si);
   9431 	else
   9432 		wm_txrxintr_enable(wmq);
   9433 
   9434 	return 1;
   9435 }
   9436 
   9437 static void
   9438 wm_handle_queue(void *arg)
   9439 {
   9440 	struct wm_queue *wmq = arg;
   9441 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9442 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9443 	struct wm_softc *sc = txq->txq_sc;
   9444 	u_int txlimit = sc->sc_tx_process_limit;
   9445 	u_int rxlimit = sc->sc_rx_process_limit;
   9446 	bool txmore;
   9447 	bool rxmore;
   9448 
   9449 	mutex_enter(txq->txq_lock);
   9450 	if (txq->txq_stopping) {
   9451 		mutex_exit(txq->txq_lock);
   9452 		return;
   9453 	}
   9454 	txmore = wm_txeof(txq, txlimit);
   9455 	wm_deferred_start_locked(txq);
   9456 	mutex_exit(txq->txq_lock);
   9457 
   9458 	mutex_enter(rxq->rxq_lock);
   9459 	if (rxq->rxq_stopping) {
   9460 		mutex_exit(rxq->rxq_lock);
   9461 		return;
   9462 	}
   9463 	WM_Q_EVCNT_INCR(rxq, defer);
   9464 	rxmore = wm_rxeof(rxq, rxlimit);
   9465 	mutex_exit(rxq->rxq_lock);
   9466 
   9467 	if (txmore || rxmore)
   9468 		softint_schedule(wmq->wmq_si);
   9469 	else
   9470 		wm_txrxintr_enable(wmq);
   9471 }
   9472 
   9473 /*
   9474  * wm_linkintr_msix:
   9475  *
   9476  *	Interrupt service routine for link status change for MSI-X.
   9477  */
   9478 static int
   9479 wm_linkintr_msix(void *arg)
   9480 {
   9481 	struct wm_softc *sc = arg;
   9482 	uint32_t reg;
   9483 	bool has_rxo;
   9484 
   9485 	DPRINTF(WM_DEBUG_LINK,
   9486 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9487 
   9488 	reg = CSR_READ(sc, WMREG_ICR);
   9489 	WM_CORE_LOCK(sc);
   9490 	if (sc->sc_core_stopping)
   9491 		goto out;
   9492 
   9493 	if ((reg & ICR_LSC) != 0) {
   9494 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9495 		wm_linkintr(sc, ICR_LSC);
   9496 	}
   9497 
   9498 	/*
   9499 	 * XXX 82574 MSI-X mode workaround
   9500 	 *
   9501 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9502 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9503 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9504 	 * interrupts by writing WMREG_ICS to process receive packets.
   9505 	 */
   9506 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9507 #if defined(WM_DEBUG)
   9508 		log(LOG_WARNING, "%s: Receive overrun\n",
   9509 		    device_xname(sc->sc_dev));
   9510 #endif /* defined(WM_DEBUG) */
   9511 
   9512 		has_rxo = true;
   9513 		/*
   9514 		 * The RXO interrupt is very high rate when receive traffic is
   9515 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9516 		 * interrupts. ICR_OTHER will be enabled at the end of
   9517 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9518 		 * ICR_RXQ(1) interrupts.
   9519 		 */
   9520 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9521 
   9522 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9523 	}
   9524 
   9525 
   9526 
   9527 out:
   9528 	WM_CORE_UNLOCK(sc);
   9529 
   9530 	if (sc->sc_type == WM_T_82574) {
   9531 		if (!has_rxo)
   9532 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9533 		else
   9534 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9535 	} else if (sc->sc_type == WM_T_82575)
   9536 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9537 	else
   9538 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9539 
   9540 	return 1;
   9541 }
   9542 
   9543 /*
   9544  * Media related.
   9545  * GMII, SGMII, TBI (and SERDES)
   9546  */
   9547 
   9548 /* Common */
   9549 
   9550 /*
   9551  * wm_tbi_serdes_set_linkled:
   9552  *
   9553  *	Update the link LED on TBI and SERDES devices.
   9554  */
   9555 static void
   9556 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9557 {
   9558 
   9559 	if (sc->sc_tbi_linkup)
   9560 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9561 	else
   9562 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9563 
   9564 	/* 82540 or newer devices are active low */
   9565 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9566 
   9567 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9568 }
   9569 
   9570 /* GMII related */
   9571 
   9572 /*
   9573  * wm_gmii_reset:
   9574  *
   9575  *	Reset the PHY.
   9576  */
   9577 static void
   9578 wm_gmii_reset(struct wm_softc *sc)
   9579 {
   9580 	uint32_t reg;
   9581 	int rv;
   9582 
   9583 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9584 		device_xname(sc->sc_dev), __func__));
   9585 
   9586 	rv = sc->phy.acquire(sc);
   9587 	if (rv != 0) {
   9588 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9589 		    __func__);
   9590 		return;
   9591 	}
   9592 
   9593 	switch (sc->sc_type) {
   9594 	case WM_T_82542_2_0:
   9595 	case WM_T_82542_2_1:
   9596 		/* null */
   9597 		break;
   9598 	case WM_T_82543:
   9599 		/*
   9600 		 * With 82543, we need to force speed and duplex on the MAC
   9601 		 * equal to what the PHY speed and duplex configuration is.
   9602 		 * In addition, we need to perform a hardware reset on the PHY
   9603 		 * to take it out of reset.
   9604 		 */
   9605 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9606 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9607 
   9608 		/* The PHY reset pin is active-low. */
   9609 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9610 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9611 		    CTRL_EXT_SWDPIN(4));
   9612 		reg |= CTRL_EXT_SWDPIO(4);
   9613 
   9614 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9615 		CSR_WRITE_FLUSH(sc);
   9616 		delay(10*1000);
   9617 
   9618 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9619 		CSR_WRITE_FLUSH(sc);
   9620 		delay(150);
   9621 #if 0
   9622 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9623 #endif
   9624 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9625 		break;
   9626 	case WM_T_82544:	/* reset 10000us */
   9627 	case WM_T_82540:
   9628 	case WM_T_82545:
   9629 	case WM_T_82545_3:
   9630 	case WM_T_82546:
   9631 	case WM_T_82546_3:
   9632 	case WM_T_82541:
   9633 	case WM_T_82541_2:
   9634 	case WM_T_82547:
   9635 	case WM_T_82547_2:
   9636 	case WM_T_82571:	/* reset 100us */
   9637 	case WM_T_82572:
   9638 	case WM_T_82573:
   9639 	case WM_T_82574:
   9640 	case WM_T_82575:
   9641 	case WM_T_82576:
   9642 	case WM_T_82580:
   9643 	case WM_T_I350:
   9644 	case WM_T_I354:
   9645 	case WM_T_I210:
   9646 	case WM_T_I211:
   9647 	case WM_T_82583:
   9648 	case WM_T_80003:
   9649 		/* generic reset */
   9650 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9651 		CSR_WRITE_FLUSH(sc);
   9652 		delay(20000);
   9653 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9654 		CSR_WRITE_FLUSH(sc);
   9655 		delay(20000);
   9656 
   9657 		if ((sc->sc_type == WM_T_82541)
   9658 		    || (sc->sc_type == WM_T_82541_2)
   9659 		    || (sc->sc_type == WM_T_82547)
   9660 		    || (sc->sc_type == WM_T_82547_2)) {
   9661 			/* workaround for igp are done in igp_reset() */
   9662 			/* XXX add code to set LED after phy reset */
   9663 		}
   9664 		break;
   9665 	case WM_T_ICH8:
   9666 	case WM_T_ICH9:
   9667 	case WM_T_ICH10:
   9668 	case WM_T_PCH:
   9669 	case WM_T_PCH2:
   9670 	case WM_T_PCH_LPT:
   9671 	case WM_T_PCH_SPT:
   9672 	case WM_T_PCH_CNP:
   9673 		/* generic reset */
   9674 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9675 		CSR_WRITE_FLUSH(sc);
   9676 		delay(100);
   9677 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9678 		CSR_WRITE_FLUSH(sc);
   9679 		delay(150);
   9680 		break;
   9681 	default:
   9682 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9683 		    __func__);
   9684 		break;
   9685 	}
   9686 
   9687 	sc->phy.release(sc);
   9688 
   9689 	/* get_cfg_done */
   9690 	wm_get_cfg_done(sc);
   9691 
   9692 	/* extra setup */
   9693 	switch (sc->sc_type) {
   9694 	case WM_T_82542_2_0:
   9695 	case WM_T_82542_2_1:
   9696 	case WM_T_82543:
   9697 	case WM_T_82544:
   9698 	case WM_T_82540:
   9699 	case WM_T_82545:
   9700 	case WM_T_82545_3:
   9701 	case WM_T_82546:
   9702 	case WM_T_82546_3:
   9703 	case WM_T_82541_2:
   9704 	case WM_T_82547_2:
   9705 	case WM_T_82571:
   9706 	case WM_T_82572:
   9707 	case WM_T_82573:
   9708 	case WM_T_82574:
   9709 	case WM_T_82583:
   9710 	case WM_T_82575:
   9711 	case WM_T_82576:
   9712 	case WM_T_82580:
   9713 	case WM_T_I350:
   9714 	case WM_T_I354:
   9715 	case WM_T_I210:
   9716 	case WM_T_I211:
   9717 	case WM_T_80003:
   9718 		/* null */
   9719 		break;
   9720 	case WM_T_82541:
   9721 	case WM_T_82547:
   9722 		/* XXX Configure actively LED after PHY reset */
   9723 		break;
   9724 	case WM_T_ICH8:
   9725 	case WM_T_ICH9:
   9726 	case WM_T_ICH10:
   9727 	case WM_T_PCH:
   9728 	case WM_T_PCH2:
   9729 	case WM_T_PCH_LPT:
   9730 	case WM_T_PCH_SPT:
   9731 	case WM_T_PCH_CNP:
   9732 		wm_phy_post_reset(sc);
   9733 		break;
   9734 	default:
   9735 		panic("%s: unknown type\n", __func__);
   9736 		break;
   9737 	}
   9738 }
   9739 
   9740 /*
   9741  * Setup sc_phytype and mii_{read|write}reg.
   9742  *
   9743  *  To identify PHY type, correct read/write function should be selected.
   9744  * To select correct read/write function, PCI ID or MAC type are required
   9745  * without accessing PHY registers.
   9746  *
   9747  *  On the first call of this function, PHY ID is not known yet. Check
   9748  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9749  * result might be incorrect.
   9750  *
   9751  *  In the second call, PHY OUI and model is used to identify PHY type.
   9752  * It might not be perfpect because of the lack of compared entry, but it
   9753  * would be better than the first call.
   9754  *
   9755  *  If the detected new result and previous assumption is different,
   9756  * diagnous message will be printed.
   9757  */
   9758 static void
   9759 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9760     uint16_t phy_model)
   9761 {
   9762 	device_t dev = sc->sc_dev;
   9763 	struct mii_data *mii = &sc->sc_mii;
   9764 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9765 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9766 	mii_readreg_t new_readreg;
   9767 	mii_writereg_t new_writereg;
   9768 
   9769 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9770 		device_xname(sc->sc_dev), __func__));
   9771 
   9772 	if (mii->mii_readreg == NULL) {
   9773 		/*
   9774 		 *  This is the first call of this function. For ICH and PCH
   9775 		 * variants, it's difficult to determine the PHY access method
   9776 		 * by sc_type, so use the PCI product ID for some devices.
   9777 		 */
   9778 
   9779 		switch (sc->sc_pcidevid) {
   9780 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9781 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9782 			/* 82577 */
   9783 			new_phytype = WMPHY_82577;
   9784 			break;
   9785 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9786 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9787 			/* 82578 */
   9788 			new_phytype = WMPHY_82578;
   9789 			break;
   9790 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9791 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9792 			/* 82579 */
   9793 			new_phytype = WMPHY_82579;
   9794 			break;
   9795 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9796 		case PCI_PRODUCT_INTEL_82801I_BM:
   9797 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9798 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9799 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9800 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9801 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9802 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9803 			/* ICH8, 9, 10 with 82567 */
   9804 			new_phytype = WMPHY_BM;
   9805 			break;
   9806 		default:
   9807 			break;
   9808 		}
   9809 	} else {
   9810 		/* It's not the first call. Use PHY OUI and model */
   9811 		switch (phy_oui) {
   9812 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9813 			switch (phy_model) {
   9814 			case 0x0004: /* XXX */
   9815 				new_phytype = WMPHY_82578;
   9816 				break;
   9817 			default:
   9818 				break;
   9819 			}
   9820 			break;
   9821 		case MII_OUI_xxMARVELL:
   9822 			switch (phy_model) {
   9823 			case MII_MODEL_xxMARVELL_I210:
   9824 				new_phytype = WMPHY_I210;
   9825 				break;
   9826 			case MII_MODEL_xxMARVELL_E1011:
   9827 			case MII_MODEL_xxMARVELL_E1000_3:
   9828 			case MII_MODEL_xxMARVELL_E1000_5:
   9829 			case MII_MODEL_xxMARVELL_E1112:
   9830 				new_phytype = WMPHY_M88;
   9831 				break;
   9832 			case MII_MODEL_xxMARVELL_E1149:
   9833 				new_phytype = WMPHY_BM;
   9834 				break;
   9835 			case MII_MODEL_xxMARVELL_E1111:
   9836 			case MII_MODEL_xxMARVELL_I347:
   9837 			case MII_MODEL_xxMARVELL_E1512:
   9838 			case MII_MODEL_xxMARVELL_E1340M:
   9839 			case MII_MODEL_xxMARVELL_E1543:
   9840 				new_phytype = WMPHY_M88;
   9841 				break;
   9842 			case MII_MODEL_xxMARVELL_I82563:
   9843 				new_phytype = WMPHY_GG82563;
   9844 				break;
   9845 			default:
   9846 				break;
   9847 			}
   9848 			break;
   9849 		case MII_OUI_INTEL:
   9850 			switch (phy_model) {
   9851 			case MII_MODEL_INTEL_I82577:
   9852 				new_phytype = WMPHY_82577;
   9853 				break;
   9854 			case MII_MODEL_INTEL_I82579:
   9855 				new_phytype = WMPHY_82579;
   9856 				break;
   9857 			case MII_MODEL_INTEL_I217:
   9858 				new_phytype = WMPHY_I217;
   9859 				break;
   9860 			case MII_MODEL_INTEL_I82580:
   9861 			case MII_MODEL_INTEL_I350:
   9862 				new_phytype = WMPHY_82580;
   9863 				break;
   9864 			default:
   9865 				break;
   9866 			}
   9867 			break;
   9868 		case MII_OUI_yyINTEL:
   9869 			switch (phy_model) {
   9870 			case MII_MODEL_yyINTEL_I82562G:
   9871 			case MII_MODEL_yyINTEL_I82562EM:
   9872 			case MII_MODEL_yyINTEL_I82562ET:
   9873 				new_phytype = WMPHY_IFE;
   9874 				break;
   9875 			case MII_MODEL_yyINTEL_IGP01E1000:
   9876 				new_phytype = WMPHY_IGP;
   9877 				break;
   9878 			case MII_MODEL_yyINTEL_I82566:
   9879 				new_phytype = WMPHY_IGP_3;
   9880 				break;
   9881 			default:
   9882 				break;
   9883 			}
   9884 			break;
   9885 		default:
   9886 			break;
   9887 		}
   9888 		if (new_phytype == WMPHY_UNKNOWN)
   9889 			aprint_verbose_dev(dev,
   9890 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   9891 			    __func__, phy_oui, phy_model);
   9892 
   9893 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9894 		    && (sc->sc_phytype != new_phytype )) {
   9895 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9896 			    "was incorrect. PHY type from PHY ID = %u\n",
   9897 			    sc->sc_phytype, new_phytype);
   9898 		}
   9899 	}
   9900 
   9901 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9902 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9903 		/* SGMII */
   9904 		new_readreg = wm_sgmii_readreg;
   9905 		new_writereg = wm_sgmii_writereg;
   9906 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9907 		/* BM2 (phyaddr == 1) */
   9908 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9909 		    && (new_phytype != WMPHY_BM)
   9910 		    && (new_phytype != WMPHY_UNKNOWN))
   9911 			doubt_phytype = new_phytype;
   9912 		new_phytype = WMPHY_BM;
   9913 		new_readreg = wm_gmii_bm_readreg;
   9914 		new_writereg = wm_gmii_bm_writereg;
   9915 	} else if (sc->sc_type >= WM_T_PCH) {
   9916 		/* All PCH* use _hv_ */
   9917 		new_readreg = wm_gmii_hv_readreg;
   9918 		new_writereg = wm_gmii_hv_writereg;
   9919 	} else if (sc->sc_type >= WM_T_ICH8) {
   9920 		/* non-82567 ICH8, 9 and 10 */
   9921 		new_readreg = wm_gmii_i82544_readreg;
   9922 		new_writereg = wm_gmii_i82544_writereg;
   9923 	} else if (sc->sc_type >= WM_T_80003) {
   9924 		/* 80003 */
   9925 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9926 		    && (new_phytype != WMPHY_GG82563)
   9927 		    && (new_phytype != WMPHY_UNKNOWN))
   9928 			doubt_phytype = new_phytype;
   9929 		new_phytype = WMPHY_GG82563;
   9930 		new_readreg = wm_gmii_i80003_readreg;
   9931 		new_writereg = wm_gmii_i80003_writereg;
   9932 	} else if (sc->sc_type >= WM_T_I210) {
   9933 		/* I210 and I211 */
   9934 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9935 		    && (new_phytype != WMPHY_I210)
   9936 		    && (new_phytype != WMPHY_UNKNOWN))
   9937 			doubt_phytype = new_phytype;
   9938 		new_phytype = WMPHY_I210;
   9939 		new_readreg = wm_gmii_gs40g_readreg;
   9940 		new_writereg = wm_gmii_gs40g_writereg;
   9941 	} else if (sc->sc_type >= WM_T_82580) {
   9942 		/* 82580, I350 and I354 */
   9943 		new_readreg = wm_gmii_82580_readreg;
   9944 		new_writereg = wm_gmii_82580_writereg;
   9945 	} else if (sc->sc_type >= WM_T_82544) {
   9946 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9947 		new_readreg = wm_gmii_i82544_readreg;
   9948 		new_writereg = wm_gmii_i82544_writereg;
   9949 	} else {
   9950 		new_readreg = wm_gmii_i82543_readreg;
   9951 		new_writereg = wm_gmii_i82543_writereg;
   9952 	}
   9953 
   9954 	if (new_phytype == WMPHY_BM) {
   9955 		/* All BM use _bm_ */
   9956 		new_readreg = wm_gmii_bm_readreg;
   9957 		new_writereg = wm_gmii_bm_writereg;
   9958 	}
   9959 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   9960 		/* All PCH* use _hv_ */
   9961 		new_readreg = wm_gmii_hv_readreg;
   9962 		new_writereg = wm_gmii_hv_writereg;
   9963 	}
   9964 
   9965 	/* Diag output */
   9966 	if (doubt_phytype != WMPHY_UNKNOWN)
   9967 		aprint_error_dev(dev, "Assumed new PHY type was "
   9968 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9969 		    new_phytype);
   9970 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9971 	    && (sc->sc_phytype != new_phytype ))
   9972 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9973 		    "was incorrect. New PHY type = %u\n",
   9974 		    sc->sc_phytype, new_phytype);
   9975 
   9976 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9977 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9978 
   9979 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9980 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9981 		    "function was incorrect.\n");
   9982 
   9983 	/* Update now */
   9984 	sc->sc_phytype = new_phytype;
   9985 	mii->mii_readreg = new_readreg;
   9986 	mii->mii_writereg = new_writereg;
   9987 	if (new_readreg == wm_gmii_hv_readreg) {
   9988 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   9989 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   9990 	} else if (new_readreg == wm_sgmii_readreg) {
   9991 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   9992 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   9993 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   9994 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   9995 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   9996 	}
   9997 }
   9998 
   9999 /*
   10000  * wm_get_phy_id_82575:
   10001  *
   10002  * Return PHY ID. Return -1 if it failed.
   10003  */
   10004 static int
   10005 wm_get_phy_id_82575(struct wm_softc *sc)
   10006 {
   10007 	uint32_t reg;
   10008 	int phyid = -1;
   10009 
   10010 	/* XXX */
   10011 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10012 		return -1;
   10013 
   10014 	if (wm_sgmii_uses_mdio(sc)) {
   10015 		switch (sc->sc_type) {
   10016 		case WM_T_82575:
   10017 		case WM_T_82576:
   10018 			reg = CSR_READ(sc, WMREG_MDIC);
   10019 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10020 			break;
   10021 		case WM_T_82580:
   10022 		case WM_T_I350:
   10023 		case WM_T_I354:
   10024 		case WM_T_I210:
   10025 		case WM_T_I211:
   10026 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10027 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10028 			break;
   10029 		default:
   10030 			return -1;
   10031 		}
   10032 	}
   10033 
   10034 	return phyid;
   10035 }
   10036 
   10037 
   10038 /*
   10039  * wm_gmii_mediainit:
   10040  *
   10041  *	Initialize media for use on 1000BASE-T devices.
   10042  */
   10043 static void
   10044 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10045 {
   10046 	device_t dev = sc->sc_dev;
   10047 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10048 	struct mii_data *mii = &sc->sc_mii;
   10049 	uint32_t reg;
   10050 
   10051 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10052 		device_xname(sc->sc_dev), __func__));
   10053 
   10054 	/* We have GMII. */
   10055 	sc->sc_flags |= WM_F_HAS_MII;
   10056 
   10057 	if (sc->sc_type == WM_T_80003)
   10058 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10059 	else
   10060 		sc->sc_tipg = TIPG_1000T_DFLT;
   10061 
   10062 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10063 	if ((sc->sc_type == WM_T_82580)
   10064 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10065 	    || (sc->sc_type == WM_T_I211)) {
   10066 		reg = CSR_READ(sc, WMREG_PHPM);
   10067 		reg &= ~PHPM_GO_LINK_D;
   10068 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10069 	}
   10070 
   10071 	/*
   10072 	 * Let the chip set speed/duplex on its own based on
   10073 	 * signals from the PHY.
   10074 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10075 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10076 	 */
   10077 	sc->sc_ctrl |= CTRL_SLU;
   10078 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10079 
   10080 	/* Initialize our media structures and probe the GMII. */
   10081 	mii->mii_ifp = ifp;
   10082 
   10083 	mii->mii_statchg = wm_gmii_statchg;
   10084 
   10085 	/* get PHY control from SMBus to PCIe */
   10086 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10087 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10088 	    || (sc->sc_type == WM_T_PCH_CNP))
   10089 		wm_init_phy_workarounds_pchlan(sc);
   10090 
   10091 	wm_gmii_reset(sc);
   10092 
   10093 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10094 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10095 	    wm_gmii_mediastatus);
   10096 
   10097 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10098 	    || (sc->sc_type == WM_T_82580)
   10099 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10100 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10101 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10102 			/* Attach only one port */
   10103 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10104 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10105 		} else {
   10106 			int i, id;
   10107 			uint32_t ctrl_ext;
   10108 
   10109 			id = wm_get_phy_id_82575(sc);
   10110 			if (id != -1) {
   10111 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10112 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10113 			}
   10114 			if ((id == -1)
   10115 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10116 				/* Power on sgmii phy if it is disabled */
   10117 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10118 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10119 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10120 				CSR_WRITE_FLUSH(sc);
   10121 				delay(300*1000); /* XXX too long */
   10122 
   10123 				/* from 1 to 8 */
   10124 				for (i = 1; i < 8; i++)
   10125 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10126 					    0xffffffff, i, MII_OFFSET_ANY,
   10127 					    MIIF_DOPAUSE);
   10128 
   10129 				/* restore previous sfp cage power state */
   10130 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10131 			}
   10132 		}
   10133 	} else
   10134 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10135 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10136 
   10137 	/*
   10138 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10139 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10140 	 */
   10141 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10142 		|| (sc->sc_type == WM_T_PCH_SPT)
   10143 		|| (sc->sc_type == WM_T_PCH_CNP))
   10144 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10145 		wm_set_mdio_slow_mode_hv(sc);
   10146 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10147 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10148 	}
   10149 
   10150 	/*
   10151 	 * (For ICH8 variants)
   10152 	 * If PHY detection failed, use BM's r/w function and retry.
   10153 	 */
   10154 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10155 		/* if failed, retry with *_bm_* */
   10156 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10157 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10158 		    sc->sc_phytype);
   10159 		sc->sc_phytype = WMPHY_BM;
   10160 		mii->mii_readreg = wm_gmii_bm_readreg;
   10161 		mii->mii_writereg = wm_gmii_bm_writereg;
   10162 
   10163 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10164 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10165 	}
   10166 
   10167 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10168 		/* Any PHY wasn't find */
   10169 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10170 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10171 		sc->sc_phytype = WMPHY_NONE;
   10172 	} else {
   10173 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10174 
   10175 		/*
   10176 		 * PHY Found! Check PHY type again by the second call of
   10177 		 * wm_gmii_setup_phytype.
   10178 		 */
   10179 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10180 		    child->mii_mpd_model);
   10181 
   10182 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10183 	}
   10184 }
   10185 
   10186 /*
   10187  * wm_gmii_mediachange:	[ifmedia interface function]
   10188  *
   10189  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10190  */
   10191 static int
   10192 wm_gmii_mediachange(struct ifnet *ifp)
   10193 {
   10194 	struct wm_softc *sc = ifp->if_softc;
   10195 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10196 	int rc;
   10197 
   10198 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10199 		device_xname(sc->sc_dev), __func__));
   10200 	if ((ifp->if_flags & IFF_UP) == 0)
   10201 		return 0;
   10202 
   10203 	/* Disable D0 LPLU. */
   10204 	wm_lplu_d0_disable(sc);
   10205 
   10206 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10207 	sc->sc_ctrl |= CTRL_SLU;
   10208 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10209 	    || (sc->sc_type > WM_T_82543)) {
   10210 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10211 	} else {
   10212 		sc->sc_ctrl &= ~CTRL_ASDE;
   10213 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10214 		if (ife->ifm_media & IFM_FDX)
   10215 			sc->sc_ctrl |= CTRL_FD;
   10216 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10217 		case IFM_10_T:
   10218 			sc->sc_ctrl |= CTRL_SPEED_10;
   10219 			break;
   10220 		case IFM_100_TX:
   10221 			sc->sc_ctrl |= CTRL_SPEED_100;
   10222 			break;
   10223 		case IFM_1000_T:
   10224 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10225 			break;
   10226 		case IFM_NONE:
   10227 			/* There is no specific setting for IFM_NONE */
   10228 			break;
   10229 		default:
   10230 			panic("wm_gmii_mediachange: bad media 0x%x",
   10231 			    ife->ifm_media);
   10232 		}
   10233 	}
   10234 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10235 	CSR_WRITE_FLUSH(sc);
   10236 	if (sc->sc_type <= WM_T_82543)
   10237 		wm_gmii_reset(sc);
   10238 
   10239 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10240 		return 0;
   10241 	return rc;
   10242 }
   10243 
   10244 /*
   10245  * wm_gmii_mediastatus:	[ifmedia interface function]
   10246  *
   10247  *	Get the current interface media status on a 1000BASE-T device.
   10248  */
   10249 static void
   10250 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10251 {
   10252 	struct wm_softc *sc = ifp->if_softc;
   10253 
   10254 	ether_mediastatus(ifp, ifmr);
   10255 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10256 	    | sc->sc_flowflags;
   10257 }
   10258 
   10259 #define	MDI_IO		CTRL_SWDPIN(2)
   10260 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10261 #define	MDI_CLK		CTRL_SWDPIN(3)
   10262 
   10263 static void
   10264 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10265 {
   10266 	uint32_t i, v;
   10267 
   10268 	v = CSR_READ(sc, WMREG_CTRL);
   10269 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10270 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10271 
   10272 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   10273 		if (data & i)
   10274 			v |= MDI_IO;
   10275 		else
   10276 			v &= ~MDI_IO;
   10277 		CSR_WRITE(sc, WMREG_CTRL, v);
   10278 		CSR_WRITE_FLUSH(sc);
   10279 		delay(10);
   10280 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10281 		CSR_WRITE_FLUSH(sc);
   10282 		delay(10);
   10283 		CSR_WRITE(sc, WMREG_CTRL, v);
   10284 		CSR_WRITE_FLUSH(sc);
   10285 		delay(10);
   10286 	}
   10287 }
   10288 
   10289 static uint16_t
   10290 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10291 {
   10292 	uint32_t v, i;
   10293 	uint16_t data = 0;
   10294 
   10295 	v = CSR_READ(sc, WMREG_CTRL);
   10296 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10297 	v |= CTRL_SWDPIO(3);
   10298 
   10299 	CSR_WRITE(sc, WMREG_CTRL, v);
   10300 	CSR_WRITE_FLUSH(sc);
   10301 	delay(10);
   10302 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10303 	CSR_WRITE_FLUSH(sc);
   10304 	delay(10);
   10305 	CSR_WRITE(sc, WMREG_CTRL, v);
   10306 	CSR_WRITE_FLUSH(sc);
   10307 	delay(10);
   10308 
   10309 	for (i = 0; i < 16; i++) {
   10310 		data <<= 1;
   10311 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10312 		CSR_WRITE_FLUSH(sc);
   10313 		delay(10);
   10314 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10315 			data |= 1;
   10316 		CSR_WRITE(sc, WMREG_CTRL, v);
   10317 		CSR_WRITE_FLUSH(sc);
   10318 		delay(10);
   10319 	}
   10320 
   10321 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10322 	CSR_WRITE_FLUSH(sc);
   10323 	delay(10);
   10324 	CSR_WRITE(sc, WMREG_CTRL, v);
   10325 	CSR_WRITE_FLUSH(sc);
   10326 	delay(10);
   10327 
   10328 	return data;
   10329 }
   10330 
   10331 #undef MDI_IO
   10332 #undef MDI_DIR
   10333 #undef MDI_CLK
   10334 
   10335 /*
   10336  * wm_gmii_i82543_readreg:	[mii interface function]
   10337  *
   10338  *	Read a PHY register on the GMII (i82543 version).
   10339  */
   10340 static int
   10341 wm_gmii_i82543_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10342 {
   10343 	struct wm_softc *sc = device_private(dev);
   10344 
   10345 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10346 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10347 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10348 	*val = wm_i82543_mii_recvbits(sc) & 0xffff;
   10349 
   10350 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04hx\n",
   10351 		device_xname(dev), phy, reg, *val));
   10352 
   10353 	return 0;
   10354 }
   10355 
   10356 /*
   10357  * wm_gmii_i82543_writereg:	[mii interface function]
   10358  *
   10359  *	Write a PHY register on the GMII (i82543 version).
   10360  */
   10361 static int
   10362 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, uint16_t val)
   10363 {
   10364 	struct wm_softc *sc = device_private(dev);
   10365 
   10366 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10367 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10368 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10369 	    (MII_COMMAND_START << 30), 32);
   10370 
   10371 	return 0;
   10372 }
   10373 
   10374 /*
   10375  * wm_gmii_mdic_readreg:	[mii interface function]
   10376  *
   10377  *	Read a PHY register on the GMII.
   10378  */
   10379 static int
   10380 wm_gmii_mdic_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10381 {
   10382 	struct wm_softc *sc = device_private(dev);
   10383 	uint32_t mdic = 0;
   10384 	int i;
   10385 
   10386 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10387 	    && (reg > MII_ADDRMASK)) {
   10388 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10389 		    __func__, sc->sc_phytype, reg);
   10390 		reg &= MII_ADDRMASK;
   10391 	}
   10392 
   10393 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10394 	    MDIC_REGADD(reg));
   10395 
   10396 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10397 		delay(50);
   10398 		mdic = CSR_READ(sc, WMREG_MDIC);
   10399 		if (mdic & MDIC_READY)
   10400 			break;
   10401 	}
   10402 
   10403 	if ((mdic & MDIC_READY) == 0) {
   10404 		DPRINTF(WM_DEBUG_GMII,
   10405 		    ("%s: MDIC read timed out: phy %d reg %d\n",
   10406 			device_xname(dev), phy, reg));
   10407 		return ETIMEDOUT;
   10408 	} else if (mdic & MDIC_E) {
   10409 		/* This is normal if no PHY is present. */
   10410 		DPRINTF(WM_DEBUG_GMII, ("%s: MDIC read error: phy %d reg %d\n",
   10411 			device_xname(sc->sc_dev), phy, reg));
   10412 		return -1;
   10413 	} else
   10414 		*val = MDIC_DATA(mdic);
   10415 
   10416 	/*
   10417 	 * Allow some time after each MDIC transaction to avoid
   10418 	 * reading duplicate data in the next MDIC transaction.
   10419 	 */
   10420 	if (sc->sc_type == WM_T_PCH2)
   10421 		delay(100);
   10422 
   10423 	return 0;
   10424 }
   10425 
   10426 /*
   10427  * wm_gmii_mdic_writereg:	[mii interface function]
   10428  *
   10429  *	Write a PHY register on the GMII.
   10430  */
   10431 static int
   10432 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, uint16_t val)
   10433 {
   10434 	struct wm_softc *sc = device_private(dev);
   10435 	uint32_t mdic = 0;
   10436 	int i;
   10437 
   10438 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10439 	    && (reg > MII_ADDRMASK)) {
   10440 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10441 		    __func__, sc->sc_phytype, reg);
   10442 		reg &= MII_ADDRMASK;
   10443 	}
   10444 
   10445 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10446 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10447 
   10448 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10449 		delay(50);
   10450 		mdic = CSR_READ(sc, WMREG_MDIC);
   10451 		if (mdic & MDIC_READY)
   10452 			break;
   10453 	}
   10454 
   10455 	if ((mdic & MDIC_READY) == 0) {
   10456 		DPRINTF(WM_DEBUG_GMII,
   10457 		    ("%s: MDIC write timed out: phy %d reg %d\n",
   10458 			device_xname(dev), phy, reg));
   10459 		return ETIMEDOUT;
   10460 	} else if (mdic & MDIC_E) {
   10461 		DPRINTF(WM_DEBUG_GMII,
   10462 		    ("%s: MDIC write error: phy %d reg %d\n",
   10463 			device_xname(dev), phy, reg));
   10464 		return -1;
   10465 	}
   10466 
   10467 	/*
   10468 	 * Allow some time after each MDIC transaction to avoid
   10469 	 * reading duplicate data in the next MDIC transaction.
   10470 	 */
   10471 	if (sc->sc_type == WM_T_PCH2)
   10472 		delay(100);
   10473 
   10474 	return 0;
   10475 }
   10476 
   10477 /*
   10478  * wm_gmii_i82544_readreg:	[mii interface function]
   10479  *
   10480  *	Read a PHY register on the GMII.
   10481  */
   10482 static int
   10483 wm_gmii_i82544_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10484 {
   10485 	struct wm_softc *sc = device_private(dev);
   10486 	int rv;
   10487 
   10488 	if (sc->phy.acquire(sc)) {
   10489 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10490 		return -1;
   10491 	}
   10492 
   10493 	rv = wm_gmii_i82544_readreg_locked(dev, phy, reg, val);
   10494 
   10495 	sc->phy.release(sc);
   10496 
   10497 	return rv;
   10498 }
   10499 
   10500 static int
   10501 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10502 {
   10503 	struct wm_softc *sc = device_private(dev);
   10504 
   10505 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10506 		switch (sc->sc_phytype) {
   10507 		case WMPHY_IGP:
   10508 		case WMPHY_IGP_2:
   10509 		case WMPHY_IGP_3:
   10510 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10511 			    reg);
   10512 			break;
   10513 		default:
   10514 #ifdef WM_DEBUG
   10515 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10516 			    __func__, sc->sc_phytype, reg);
   10517 #endif
   10518 			break;
   10519 		}
   10520 	}
   10521 
   10522 	wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10523 
   10524 	return 0;
   10525 }
   10526 
   10527 /*
   10528  * wm_gmii_i82544_writereg:	[mii interface function]
   10529  *
   10530  *	Write a PHY register on the GMII.
   10531  */
   10532 static int
   10533 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, uint16_t val)
   10534 {
   10535 	struct wm_softc *sc = device_private(dev);
   10536 	int rv;
   10537 
   10538 	if (sc->phy.acquire(sc)) {
   10539 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10540 		return -1;
   10541 	}
   10542 
   10543 	rv = wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10544 	sc->phy.release(sc);
   10545 
   10546 	return rv;
   10547 }
   10548 
   10549 static int
   10550 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10551 {
   10552 	struct wm_softc *sc = device_private(dev);
   10553 
   10554 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10555 		switch (sc->sc_phytype) {
   10556 		case WMPHY_IGP:
   10557 		case WMPHY_IGP_2:
   10558 		case WMPHY_IGP_3:
   10559 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10560 			    reg);
   10561 			break;
   10562 		default:
   10563 #ifdef WM_DEBUG
   10564 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10565 			    __func__, sc->sc_phytype, reg);
   10566 #endif
   10567 			break;
   10568 		}
   10569 	}
   10570 
   10571 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10572 
   10573 	return 0;
   10574 }
   10575 
   10576 /*
   10577  * wm_gmii_i80003_readreg:	[mii interface function]
   10578  *
   10579  *	Read a PHY register on the kumeran
   10580  * This could be handled by the PHY layer if we didn't have to lock the
   10581  * ressource ...
   10582  */
   10583 static int
   10584 wm_gmii_i80003_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10585 {
   10586 	struct wm_softc *sc = device_private(dev);
   10587 	int page_select;
   10588 	uint16_t temp, temp2;
   10589 	int rv = 0;
   10590 
   10591 	if (phy != 1) /* only one PHY on kumeran bus */
   10592 		return -1;
   10593 
   10594 	if (sc->phy.acquire(sc)) {
   10595 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10596 		return -1;
   10597 	}
   10598 
   10599 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10600 		page_select = GG82563_PHY_PAGE_SELECT;
   10601 	else {
   10602 		/*
   10603 		 * Use Alternative Page Select register to access registers
   10604 		 * 30 and 31.
   10605 		 */
   10606 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10607 	}
   10608 	temp = reg >> GG82563_PAGE_SHIFT;
   10609 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10610 		goto out;
   10611 
   10612 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10613 		/*
   10614 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10615 		 * register.
   10616 		 */
   10617 		delay(200);
   10618 		wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10619 		if (temp2 != temp) {
   10620 			device_printf(dev, "%s failed\n", __func__);
   10621 			rv = -1;
   10622 			goto out;
   10623 		}
   10624 		delay(200);
   10625 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10626 		delay(200);
   10627 	} else
   10628 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10629 
   10630 out:
   10631 	sc->phy.release(sc);
   10632 	return rv;
   10633 }
   10634 
   10635 /*
   10636  * wm_gmii_i80003_writereg:	[mii interface function]
   10637  *
   10638  *	Write a PHY register on the kumeran.
   10639  * This could be handled by the PHY layer if we didn't have to lock the
   10640  * ressource ...
   10641  */
   10642 static int
   10643 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, uint16_t val)
   10644 {
   10645 	struct wm_softc *sc = device_private(dev);
   10646 	int page_select, rv;
   10647 	uint16_t temp, temp2;
   10648 
   10649 	if (phy != 1) /* only one PHY on kumeran bus */
   10650 		return -1;
   10651 
   10652 	if (sc->phy.acquire(sc)) {
   10653 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10654 		return -1;
   10655 	}
   10656 
   10657 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10658 		page_select = GG82563_PHY_PAGE_SELECT;
   10659 	else {
   10660 		/*
   10661 		 * Use Alternative Page Select register to access registers
   10662 		 * 30 and 31.
   10663 		 */
   10664 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10665 	}
   10666 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10667 	if ((rv = wm_gmii_mdic_writereg(dev, phy, page_select, temp)) != 0)
   10668 		goto out;
   10669 
   10670 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10671 		/*
   10672 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10673 		 * register.
   10674 		 */
   10675 		delay(200);
   10676 		wm_gmii_mdic_readreg(dev, phy, page_select, &temp2);
   10677 		if (temp2 != temp) {
   10678 			device_printf(dev, "%s failed\n", __func__);
   10679 			rv = -1;
   10680 			goto out;
   10681 		}
   10682 		delay(200);
   10683 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10684 		delay(200);
   10685 	} else
   10686 		rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10687 
   10688 out:
   10689 	sc->phy.release(sc);
   10690 	return rv;
   10691 }
   10692 
   10693 /*
   10694  * wm_gmii_bm_readreg:	[mii interface function]
   10695  *
   10696  *	Read a PHY register on the kumeran
   10697  * This could be handled by the PHY layer if we didn't have to lock the
   10698  * ressource ...
   10699  */
   10700 static int
   10701 wm_gmii_bm_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10702 {
   10703 	struct wm_softc *sc = device_private(dev);
   10704 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10705 	int rv;
   10706 
   10707 	if (sc->phy.acquire(sc)) {
   10708 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10709 		return -1;
   10710 	}
   10711 
   10712 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10713 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10714 		    || (reg == 31)) ? 1 : phy;
   10715 	/* Page 800 works differently than the rest so it has its own func */
   10716 	if (page == BM_WUC_PAGE) {
   10717 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   10718 		goto release;
   10719 	}
   10720 
   10721 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10722 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10723 		    && (sc->sc_type != WM_T_82583))
   10724 			rv = wm_gmii_mdic_writereg(dev, phy,
   10725 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10726 		else
   10727 			rv = wm_gmii_mdic_writereg(dev, phy,
   10728 			    BME1000_PHY_PAGE_SELECT, page);
   10729 		if (rv != 0)
   10730 			goto release;
   10731 	}
   10732 
   10733 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK, val);
   10734 
   10735 release:
   10736 	sc->phy.release(sc);
   10737 	return rv;
   10738 }
   10739 
   10740 /*
   10741  * wm_gmii_bm_writereg:	[mii interface function]
   10742  *
   10743  *	Write a PHY register on the kumeran.
   10744  * This could be handled by the PHY layer if we didn't have to lock the
   10745  * ressource ...
   10746  */
   10747 static int
   10748 wm_gmii_bm_writereg(device_t dev, int phy, int reg, uint16_t val)
   10749 {
   10750 	struct wm_softc *sc = device_private(dev);
   10751 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10752 	int rv;
   10753 
   10754 	if (sc->phy.acquire(sc)) {
   10755 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10756 		return -1;
   10757 	}
   10758 
   10759 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10760 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10761 		    || (reg == 31)) ? 1 : phy;
   10762 	/* Page 800 works differently than the rest so it has its own func */
   10763 	if (page == BM_WUC_PAGE) {
   10764 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &val, false, false);
   10765 		goto release;
   10766 	}
   10767 
   10768 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10769 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10770 		    && (sc->sc_type != WM_T_82583))
   10771 			rv = wm_gmii_mdic_writereg(dev, phy,
   10772 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10773 		else
   10774 			rv = wm_gmii_mdic_writereg(dev, phy,
   10775 			    BME1000_PHY_PAGE_SELECT, page);
   10776 		if (rv != 0)
   10777 			goto release;
   10778 	}
   10779 
   10780 	rv = wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10781 
   10782 release:
   10783 	sc->phy.release(sc);
   10784 	return rv;
   10785 }
   10786 
   10787 /*
   10788  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   10789  *  @dev: pointer to the HW structure
   10790  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   10791  *
   10792  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   10793  *  address to store contents of the BM_WUC_ENABLE_REG register.
   10794  */
   10795 static int
   10796 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10797 {
   10798 	uint16_t temp;
   10799 	int rv;
   10800 
   10801 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10802 		device_xname(dev), __func__));
   10803 
   10804 	if (!phy_regp)
   10805 		return -1;
   10806 
   10807 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   10808 
   10809 	/* Select Port Control Registers page */
   10810 	rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10811 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10812 	if (rv != 0)
   10813 		return rv;
   10814 
   10815 	/* Read WUCE and save it */
   10816 	rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG, phy_regp);
   10817 	if (rv != 0)
   10818 		return rv;
   10819 
   10820 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   10821 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   10822 	 */
   10823 	temp = *phy_regp;
   10824 	temp |= BM_WUC_ENABLE_BIT;
   10825 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10826 
   10827 	if ((rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp)) != 0)
   10828 		return rv;
   10829 
   10830 	/* Select Host Wakeup Registers page - caller now able to write
   10831 	 * registers on the Wakeup registers page
   10832 	 */
   10833 	return wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10834 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   10835 }
   10836 
   10837 /*
   10838  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   10839  *  @dev: pointer to the HW structure
   10840  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   10841  *
   10842  *  Restore BM_WUC_ENABLE_REG to its original value.
   10843  *
   10844  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   10845  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   10846  *  caller.
   10847  */
   10848 static int
   10849 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10850 {
   10851 
   10852 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10853 		device_xname(dev), __func__));
   10854 
   10855 	if (!phy_regp)
   10856 		return -1;
   10857 
   10858 	/* Select Port Control Registers page */
   10859 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10860 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10861 
   10862 	/* Restore 769.17 to its original value */
   10863 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   10864 
   10865 	return 0;
   10866 }
   10867 
   10868 /*
   10869  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   10870  *  @sc: pointer to the HW structure
   10871  *  @offset: register offset to be read or written
   10872  *  @val: pointer to the data to read or write
   10873  *  @rd: determines if operation is read or write
   10874  *  @page_set: BM_WUC_PAGE already set and access enabled
   10875  *
   10876  *  Read the PHY register at offset and store the retrieved information in
   10877  *  data, or write data to PHY register at offset.  Note the procedure to
   10878  *  access the PHY wakeup registers is different than reading the other PHY
   10879  *  registers. It works as such:
   10880  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   10881  *  2) Set page to 800 for host (801 if we were manageability)
   10882  *  3) Write the address using the address opcode (0x11)
   10883  *  4) Read or write the data using the data opcode (0x12)
   10884  *  5) Restore 769.17.2 to its original value
   10885  *
   10886  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   10887  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   10888  *
   10889  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   10890  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   10891  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   10892  */
   10893 static int
   10894 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   10895 	bool page_set)
   10896 {
   10897 	struct wm_softc *sc = device_private(dev);
   10898 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10899 	uint16_t page = BM_PHY_REG_PAGE(offset);
   10900 	uint16_t wuce;
   10901 	int rv = 0;
   10902 
   10903 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10904 		device_xname(dev), __func__));
   10905 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10906 	if ((sc->sc_type == WM_T_PCH)
   10907 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   10908 		device_printf(dev,
   10909 		    "Attempting to access page %d while gig enabled.\n", page);
   10910 	}
   10911 
   10912 	if (!page_set) {
   10913 		/* Enable access to PHY wakeup registers */
   10914 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   10915 		if (rv != 0) {
   10916 			device_printf(dev,
   10917 			    "%s: Could not enable PHY wakeup reg access\n",
   10918 			    __func__);
   10919 			return rv;
   10920 		}
   10921 	}
   10922 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   10923 		device_xname(sc->sc_dev), __func__, page, regnum));
   10924 
   10925 	/*
   10926 	 * 2) Access PHY wakeup register.
   10927 	 * See wm_access_phy_wakeup_reg_bm.
   10928 	 */
   10929 
   10930 	/* Write the Wakeup register page offset value using opcode 0x11 */
   10931 	rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10932 	if (rv != 0)
   10933 		return rv;
   10934 
   10935 	if (rd) {
   10936 		/* Read the Wakeup register page value using opcode 0x12 */
   10937 		rv = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE, val);
   10938 	} else {
   10939 		/* Write the Wakeup register page value using opcode 0x12 */
   10940 		rv = wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10941 	}
   10942 	if (rv != 0)
   10943 		return rv;
   10944 
   10945 	if (!page_set)
   10946 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   10947 
   10948 	return rv;
   10949 }
   10950 
   10951 /*
   10952  * wm_gmii_hv_readreg:	[mii interface function]
   10953  *
   10954  *	Read a PHY register on the kumeran
   10955  * This could be handled by the PHY layer if we didn't have to lock the
   10956  * ressource ...
   10957  */
   10958 static int
   10959 wm_gmii_hv_readreg(device_t dev, int phy, int reg, uint16_t *val)
   10960 {
   10961 	struct wm_softc *sc = device_private(dev);
   10962 	int rv;
   10963 
   10964 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10965 		device_xname(dev), __func__));
   10966 	if (sc->phy.acquire(sc)) {
   10967 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10968 		return -1;
   10969 	}
   10970 
   10971 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg, val);
   10972 	sc->phy.release(sc);
   10973 	return rv;
   10974 }
   10975 
   10976 static int
   10977 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10978 {
   10979 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10980 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10981 	int rv;
   10982 
   10983 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10984 
   10985 	/* Page 800 works differently than the rest so it has its own func */
   10986 	if (page == BM_WUC_PAGE)
   10987 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   10988 
   10989 	/*
   10990 	 * Lower than page 768 works differently than the rest so it has its
   10991 	 * own func
   10992 	 */
   10993 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10994 		printf("gmii_hv_readreg!!!\n");
   10995 		return -1;
   10996 	}
   10997 
   10998 	/*
   10999 	 * XXX I21[789] documents say that the SMBus Address register is at
   11000 	 * PHY address 01, Page 0 (not 768), Register 26.
   11001 	 */
   11002 	if (page == HV_INTC_FC_PAGE_START)
   11003 		page = 0;
   11004 
   11005 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11006 		rv = wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11007 		    page << BME1000_PAGE_SHIFT);
   11008 		if (rv != 0)
   11009 			return rv;
   11010 	}
   11011 
   11012 	return wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK, val);
   11013 }
   11014 
   11015 /*
   11016  * wm_gmii_hv_writereg:	[mii interface function]
   11017  *
   11018  *	Write a PHY register on the kumeran.
   11019  * This could be handled by the PHY layer if we didn't have to lock the
   11020  * ressource ...
   11021  */
   11022 static int
   11023 wm_gmii_hv_writereg(device_t dev, int phy, int reg, uint16_t val)
   11024 {
   11025 	struct wm_softc *sc = device_private(dev);
   11026 	int rv;
   11027 
   11028 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   11029 		device_xname(dev), __func__));
   11030 
   11031 	if (sc->phy.acquire(sc)) {
   11032 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11033 		return -1;
   11034 	}
   11035 
   11036 	rv = wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11037 	sc->phy.release(sc);
   11038 
   11039 	return rv;
   11040 }
   11041 
   11042 static int
   11043 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11044 {
   11045 	struct wm_softc *sc = device_private(dev);
   11046 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11047 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11048 	int rv;
   11049 
   11050 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11051 
   11052 	/* Page 800 works differently than the rest so it has its own func */
   11053 	if (page == BM_WUC_PAGE)
   11054 		return wm_access_phy_wakeup_reg_bm(dev, reg, &val, false,
   11055 		    false);
   11056 
   11057 	/*
   11058 	 * Lower than page 768 works differently than the rest so it has its
   11059 	 * own func
   11060 	 */
   11061 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11062 		printf("gmii_hv_writereg!!!\n");
   11063 		return -1;
   11064 	}
   11065 
   11066 	{
   11067 		/*
   11068 		 * XXX I21[789] documents say that the SMBus Address register
   11069 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11070 		 */
   11071 		if (page == HV_INTC_FC_PAGE_START)
   11072 			page = 0;
   11073 
   11074 		/*
   11075 		 * XXX Workaround MDIO accesses being disabled after entering
   11076 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11077 		 * register is set)
   11078 		 */
   11079 		if (sc->sc_phytype == WMPHY_82578) {
   11080 			struct mii_softc *child;
   11081 
   11082 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11083 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11084 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11085 			    && ((val & (1 << 11)) != 0)) {
   11086 				printf("XXX need workaround\n");
   11087 			}
   11088 		}
   11089 
   11090 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11091 			rv = wm_gmii_mdic_writereg(dev, 1,
   11092 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   11093 			if (rv != 0)
   11094 				return rv;
   11095 		}
   11096 	}
   11097 
   11098 	return wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11099 }
   11100 
   11101 /*
   11102  * wm_gmii_82580_readreg:	[mii interface function]
   11103  *
   11104  *	Read a PHY register on the 82580 and I350.
   11105  * This could be handled by the PHY layer if we didn't have to lock the
   11106  * ressource ...
   11107  */
   11108 static int
   11109 wm_gmii_82580_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11110 {
   11111 	struct wm_softc *sc = device_private(dev);
   11112 	int rv;
   11113 
   11114 	if (sc->phy.acquire(sc) != 0) {
   11115 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11116 		return -1;
   11117 	}
   11118 
   11119 #ifdef DIAGNOSTIC
   11120 	if (reg > MII_ADDRMASK) {
   11121 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11122 		    __func__, sc->sc_phytype, reg);
   11123 		reg &= MII_ADDRMASK;
   11124 	}
   11125 #endif
   11126 	rv = wm_gmii_mdic_readreg(dev, phy, reg, val);
   11127 
   11128 	sc->phy.release(sc);
   11129 	return rv;
   11130 }
   11131 
   11132 /*
   11133  * wm_gmii_82580_writereg:	[mii interface function]
   11134  *
   11135  *	Write a PHY register on the 82580 and I350.
   11136  * This could be handled by the PHY layer if we didn't have to lock the
   11137  * ressource ...
   11138  */
   11139 static int
   11140 wm_gmii_82580_writereg(device_t dev, int phy, int reg, uint16_t val)
   11141 {
   11142 	struct wm_softc *sc = device_private(dev);
   11143 	int rv;
   11144 
   11145 	if (sc->phy.acquire(sc) != 0) {
   11146 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11147 		return -1;
   11148 	}
   11149 
   11150 #ifdef DIAGNOSTIC
   11151 	if (reg > MII_ADDRMASK) {
   11152 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11153 		    __func__, sc->sc_phytype, reg);
   11154 		reg &= MII_ADDRMASK;
   11155 	}
   11156 #endif
   11157 	rv = wm_gmii_mdic_writereg(dev, phy, reg, val);
   11158 
   11159 	sc->phy.release(sc);
   11160 	return rv;
   11161 }
   11162 
   11163 /*
   11164  * wm_gmii_gs40g_readreg:	[mii interface function]
   11165  *
   11166  *	Read a PHY register on the I2100 and I211.
   11167  * This could be handled by the PHY layer if we didn't have to lock the
   11168  * ressource ...
   11169  */
   11170 static int
   11171 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11172 {
   11173 	struct wm_softc *sc = device_private(dev);
   11174 	int page, offset;
   11175 	int rv;
   11176 
   11177 	/* Acquire semaphore */
   11178 	if (sc->phy.acquire(sc)) {
   11179 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11180 		return -1;
   11181 	}
   11182 
   11183 	/* Page select */
   11184 	page = reg >> GS40G_PAGE_SHIFT;
   11185 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11186 	if (rv != 0)
   11187 		goto release;
   11188 
   11189 	/* Read reg */
   11190 	offset = reg & GS40G_OFFSET_MASK;
   11191 	rv = wm_gmii_mdic_readreg(dev, phy, offset, val);
   11192 
   11193 release:
   11194 	sc->phy.release(sc);
   11195 	return rv;
   11196 }
   11197 
   11198 /*
   11199  * wm_gmii_gs40g_writereg:	[mii interface function]
   11200  *
   11201  *	Write a PHY register on the I210 and I211.
   11202  * This could be handled by the PHY layer if we didn't have to lock the
   11203  * ressource ...
   11204  */
   11205 static int
   11206 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, uint16_t val)
   11207 {
   11208 	struct wm_softc *sc = device_private(dev);
   11209 	uint16_t page;
   11210 	int offset, rv;
   11211 
   11212 	/* Acquire semaphore */
   11213 	if (sc->phy.acquire(sc)) {
   11214 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11215 		return -1;
   11216 	}
   11217 
   11218 	/* Page select */
   11219 	page = reg >> GS40G_PAGE_SHIFT;
   11220 	rv = wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11221 	if (rv != 0)
   11222 		goto release;
   11223 
   11224 	/* Write reg */
   11225 	offset = reg & GS40G_OFFSET_MASK;
   11226 	rv = wm_gmii_mdic_writereg(dev, phy, offset, val);
   11227 
   11228 release:
   11229 	/* Release semaphore */
   11230 	sc->phy.release(sc);
   11231 	return rv;
   11232 }
   11233 
   11234 /*
   11235  * wm_gmii_statchg:	[mii interface function]
   11236  *
   11237  *	Callback from MII layer when media changes.
   11238  */
   11239 static void
   11240 wm_gmii_statchg(struct ifnet *ifp)
   11241 {
   11242 	struct wm_softc *sc = ifp->if_softc;
   11243 	struct mii_data *mii = &sc->sc_mii;
   11244 
   11245 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11246 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11247 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11248 
   11249 	/*
   11250 	 * Get flow control negotiation result.
   11251 	 */
   11252 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11253 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11254 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11255 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11256 	}
   11257 
   11258 	if (sc->sc_flowflags & IFM_FLOW) {
   11259 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11260 			sc->sc_ctrl |= CTRL_TFCE;
   11261 			sc->sc_fcrtl |= FCRTL_XONE;
   11262 		}
   11263 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11264 			sc->sc_ctrl |= CTRL_RFCE;
   11265 	}
   11266 
   11267 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   11268 		DPRINTF(WM_DEBUG_LINK,
   11269 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11270 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11271 	} else {
   11272 		DPRINTF(WM_DEBUG_LINK,
   11273 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11274 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11275 	}
   11276 
   11277 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11278 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11279 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11280 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11281 	if (sc->sc_type == WM_T_80003) {
   11282 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   11283 		case IFM_1000_T:
   11284 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11285 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11286 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11287 			break;
   11288 		default:
   11289 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11290 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11291 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11292 			break;
   11293 		}
   11294 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11295 	}
   11296 }
   11297 
   11298 /* kumeran related (80003, ICH* and PCH*) */
   11299 
   11300 /*
   11301  * wm_kmrn_readreg:
   11302  *
   11303  *	Read a kumeran register
   11304  */
   11305 static int
   11306 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11307 {
   11308 	int rv;
   11309 
   11310 	if (sc->sc_type == WM_T_80003)
   11311 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11312 	else
   11313 		rv = sc->phy.acquire(sc);
   11314 	if (rv != 0) {
   11315 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11316 		    __func__);
   11317 		return rv;
   11318 	}
   11319 
   11320 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11321 
   11322 	if (sc->sc_type == WM_T_80003)
   11323 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11324 	else
   11325 		sc->phy.release(sc);
   11326 
   11327 	return rv;
   11328 }
   11329 
   11330 static int
   11331 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11332 {
   11333 
   11334 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11335 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11336 	    KUMCTRLSTA_REN);
   11337 	CSR_WRITE_FLUSH(sc);
   11338 	delay(2);
   11339 
   11340 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11341 
   11342 	return 0;
   11343 }
   11344 
   11345 /*
   11346  * wm_kmrn_writereg:
   11347  *
   11348  *	Write a kumeran register
   11349  */
   11350 static int
   11351 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11352 {
   11353 	int rv;
   11354 
   11355 	if (sc->sc_type == WM_T_80003)
   11356 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11357 	else
   11358 		rv = sc->phy.acquire(sc);
   11359 	if (rv != 0) {
   11360 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11361 		    __func__);
   11362 		return rv;
   11363 	}
   11364 
   11365 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11366 
   11367 	if (sc->sc_type == WM_T_80003)
   11368 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11369 	else
   11370 		sc->phy.release(sc);
   11371 
   11372 	return rv;
   11373 }
   11374 
   11375 static int
   11376 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11377 {
   11378 
   11379 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11380 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11381 
   11382 	return 0;
   11383 }
   11384 
   11385 /*
   11386  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11387  * This access method is different from IEEE MMD.
   11388  */
   11389 static int
   11390 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11391 {
   11392 	struct wm_softc *sc = device_private(dev);
   11393 	int rv;
   11394 
   11395 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11396 	if (rv != 0)
   11397 		return rv;
   11398 
   11399 	if (rd)
   11400 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11401 	else
   11402 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11403 	return rv;
   11404 }
   11405 
   11406 static int
   11407 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11408 {
   11409 
   11410 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11411 }
   11412 
   11413 static int
   11414 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11415 {
   11416 
   11417 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11418 }
   11419 
   11420 /* SGMII related */
   11421 
   11422 /*
   11423  * wm_sgmii_uses_mdio
   11424  *
   11425  * Check whether the transaction is to the internal PHY or the external
   11426  * MDIO interface. Return true if it's MDIO.
   11427  */
   11428 static bool
   11429 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11430 {
   11431 	uint32_t reg;
   11432 	bool ismdio = false;
   11433 
   11434 	switch (sc->sc_type) {
   11435 	case WM_T_82575:
   11436 	case WM_T_82576:
   11437 		reg = CSR_READ(sc, WMREG_MDIC);
   11438 		ismdio = ((reg & MDIC_DEST) != 0);
   11439 		break;
   11440 	case WM_T_82580:
   11441 	case WM_T_I350:
   11442 	case WM_T_I354:
   11443 	case WM_T_I210:
   11444 	case WM_T_I211:
   11445 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11446 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11447 		break;
   11448 	default:
   11449 		break;
   11450 	}
   11451 
   11452 	return ismdio;
   11453 }
   11454 
   11455 /*
   11456  * wm_sgmii_readreg:	[mii interface function]
   11457  *
   11458  *	Read a PHY register on the SGMII
   11459  * This could be handled by the PHY layer if we didn't have to lock the
   11460  * ressource ...
   11461  */
   11462 static int
   11463 wm_sgmii_readreg(device_t dev, int phy, int reg, uint16_t *val)
   11464 {
   11465 	struct wm_softc *sc = device_private(dev);
   11466 	int rv;
   11467 
   11468 	if (sc->phy.acquire(sc)) {
   11469 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11470 		return -1;
   11471 	}
   11472 
   11473 	rv = wm_sgmii_readreg_locked(dev, phy, reg, val);
   11474 
   11475 	sc->phy.release(sc);
   11476 	return rv;
   11477 }
   11478 
   11479 static int
   11480 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11481 {
   11482 	struct wm_softc *sc = device_private(dev);
   11483 	uint32_t i2ccmd;
   11484 	int i, rv;
   11485 
   11486 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11487 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11488 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11489 
   11490 	/* Poll the ready bit */
   11491 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11492 		delay(50);
   11493 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11494 		if (i2ccmd & I2CCMD_READY)
   11495 			break;
   11496 	}
   11497 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11498 		device_printf(dev, "I2CCMD Read did not complete\n");
   11499 		rv = ETIMEDOUT;
   11500 	}
   11501 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11502 		device_printf(dev, "I2CCMD Error bit set\n");
   11503 		rv = EIO;
   11504 	}
   11505 
   11506 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11507 
   11508 	return rv;
   11509 }
   11510 
   11511 /*
   11512  * wm_sgmii_writereg:	[mii interface function]
   11513  *
   11514  *	Write a PHY register on the SGMII.
   11515  * This could be handled by the PHY layer if we didn't have to lock the
   11516  * ressource ...
   11517  */
   11518 static int
   11519 wm_sgmii_writereg(device_t dev, int phy, int reg, uint16_t val)
   11520 {
   11521 	struct wm_softc *sc = device_private(dev);
   11522 	int rv;
   11523 
   11524 	if (sc->phy.acquire(sc) != 0) {
   11525 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11526 		return -1;
   11527 	}
   11528 
   11529 	rv = wm_sgmii_writereg_locked(dev, phy, reg, val);
   11530 
   11531 	sc->phy.release(sc);
   11532 
   11533 	return rv;
   11534 }
   11535 
   11536 static int
   11537 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11538 {
   11539 	struct wm_softc *sc = device_private(dev);
   11540 	uint32_t i2ccmd;
   11541 	uint16_t swapdata;
   11542 	int rv = 0;
   11543 	int i;
   11544 
   11545 	/* Swap the data bytes for the I2C interface */
   11546 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11547 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11548 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11549 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11550 
   11551 	/* Poll the ready bit */
   11552 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11553 		delay(50);
   11554 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11555 		if (i2ccmd & I2CCMD_READY)
   11556 			break;
   11557 	}
   11558 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11559 		device_printf(dev, "I2CCMD Write did not complete\n");
   11560 		rv = ETIMEDOUT;
   11561 	}
   11562 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11563 		device_printf(dev, "I2CCMD Error bit set\n");
   11564 		rv = EIO;
   11565 	}
   11566 
   11567 	return rv;
   11568 }
   11569 
   11570 /* TBI related */
   11571 
   11572 static bool
   11573 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11574 {
   11575 	bool sig;
   11576 
   11577 	sig = ctrl & CTRL_SWDPIN(1);
   11578 
   11579 	/*
   11580 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11581 	 * detect a signal, 1 if they don't.
   11582 	 */
   11583 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11584 		sig = !sig;
   11585 
   11586 	return sig;
   11587 }
   11588 
   11589 /*
   11590  * wm_tbi_mediainit:
   11591  *
   11592  *	Initialize media for use on 1000BASE-X devices.
   11593  */
   11594 static void
   11595 wm_tbi_mediainit(struct wm_softc *sc)
   11596 {
   11597 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11598 	const char *sep = "";
   11599 
   11600 	if (sc->sc_type < WM_T_82543)
   11601 		sc->sc_tipg = TIPG_WM_DFLT;
   11602 	else
   11603 		sc->sc_tipg = TIPG_LG_DFLT;
   11604 
   11605 	sc->sc_tbi_serdes_anegticks = 5;
   11606 
   11607 	/* Initialize our media structures */
   11608 	sc->sc_mii.mii_ifp = ifp;
   11609 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11610 
   11611 	if (((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211))
   11612 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11613 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11614 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11615 	else
   11616 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11617 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11618 
   11619 	/*
   11620 	 * SWD Pins:
   11621 	 *
   11622 	 *	0 = Link LED (output)
   11623 	 *	1 = Loss Of Signal (input)
   11624 	 */
   11625 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11626 
   11627 	/* XXX Perhaps this is only for TBI */
   11628 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11629 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11630 
   11631 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11632 		sc->sc_ctrl &= ~CTRL_LRST;
   11633 
   11634 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11635 
   11636 #define	ADD(ss, mm, dd)							\
   11637 do {									\
   11638 	aprint_normal("%s%s", sep, ss);					\
   11639 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11640 	sep = ", ";							\
   11641 } while (/*CONSTCOND*/0)
   11642 
   11643 	aprint_normal_dev(sc->sc_dev, "");
   11644 
   11645 	if (sc->sc_type == WM_T_I354) {
   11646 		uint32_t status;
   11647 
   11648 		status = CSR_READ(sc, WMREG_STATUS);
   11649 		if (((status & STATUS_2P5_SKU) != 0)
   11650 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11651 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11652 		} else
   11653 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11654 	} else if (sc->sc_type == WM_T_82545) {
   11655 		/* Only 82545 is LX (XXX except SFP) */
   11656 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11657 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11658 	} else {
   11659 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11660 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11661 	}
   11662 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11663 	aprint_normal("\n");
   11664 
   11665 #undef ADD
   11666 
   11667 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11668 }
   11669 
   11670 /*
   11671  * wm_tbi_mediachange:	[ifmedia interface function]
   11672  *
   11673  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11674  */
   11675 static int
   11676 wm_tbi_mediachange(struct ifnet *ifp)
   11677 {
   11678 	struct wm_softc *sc = ifp->if_softc;
   11679 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11680 	uint32_t status, ctrl;
   11681 	bool signal;
   11682 	int i;
   11683 
   11684 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11685 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11686 		/* XXX need some work for >= 82571 and < 82575 */
   11687 		if (sc->sc_type < WM_T_82575)
   11688 			return 0;
   11689 	}
   11690 
   11691 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11692 	    || (sc->sc_type >= WM_T_82575))
   11693 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11694 
   11695 	sc->sc_ctrl &= ~CTRL_LRST;
   11696 	sc->sc_txcw = TXCW_ANE;
   11697 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11698 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11699 	else if (ife->ifm_media & IFM_FDX)
   11700 		sc->sc_txcw |= TXCW_FD;
   11701 	else
   11702 		sc->sc_txcw |= TXCW_HD;
   11703 
   11704 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11705 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11706 
   11707 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11708 		device_xname(sc->sc_dev), sc->sc_txcw));
   11709 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11710 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11711 	CSR_WRITE_FLUSH(sc);
   11712 	delay(1000);
   11713 
   11714 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11715 	signal = wm_tbi_havesignal(sc, ctrl);
   11716 
   11717 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11718 		signal));
   11719 
   11720 	if (signal) {
   11721 		/* Have signal; wait for the link to come up. */
   11722 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11723 			delay(10000);
   11724 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11725 				break;
   11726 		}
   11727 
   11728 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11729 			device_xname(sc->sc_dev),i));
   11730 
   11731 		status = CSR_READ(sc, WMREG_STATUS);
   11732 		DPRINTF(WM_DEBUG_LINK,
   11733 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11734 			device_xname(sc->sc_dev),status, STATUS_LU));
   11735 		if (status & STATUS_LU) {
   11736 			/* Link is up. */
   11737 			DPRINTF(WM_DEBUG_LINK,
   11738 			    ("%s: LINK: set media -> link up %s\n",
   11739 				device_xname(sc->sc_dev),
   11740 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11741 
   11742 			/*
   11743 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11744 			 * so we should update sc->sc_ctrl
   11745 			 */
   11746 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11747 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11748 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11749 			if (status & STATUS_FD)
   11750 				sc->sc_tctl |=
   11751 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11752 			else
   11753 				sc->sc_tctl |=
   11754 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11755 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11756 				sc->sc_fcrtl |= FCRTL_XONE;
   11757 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11758 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11759 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11760 			sc->sc_tbi_linkup = 1;
   11761 		} else {
   11762 			if (i == WM_LINKUP_TIMEOUT)
   11763 				wm_check_for_link(sc);
   11764 			/* Link is down. */
   11765 			DPRINTF(WM_DEBUG_LINK,
   11766 			    ("%s: LINK: set media -> link down\n",
   11767 				device_xname(sc->sc_dev)));
   11768 			sc->sc_tbi_linkup = 0;
   11769 		}
   11770 	} else {
   11771 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11772 			device_xname(sc->sc_dev)));
   11773 		sc->sc_tbi_linkup = 0;
   11774 	}
   11775 
   11776 	wm_tbi_serdes_set_linkled(sc);
   11777 
   11778 	return 0;
   11779 }
   11780 
   11781 /*
   11782  * wm_tbi_mediastatus:	[ifmedia interface function]
   11783  *
   11784  *	Get the current interface media status on a 1000BASE-X device.
   11785  */
   11786 static void
   11787 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11788 {
   11789 	struct wm_softc *sc = ifp->if_softc;
   11790 	uint32_t ctrl, status;
   11791 
   11792 	ifmr->ifm_status = IFM_AVALID;
   11793 	ifmr->ifm_active = IFM_ETHER;
   11794 
   11795 	status = CSR_READ(sc, WMREG_STATUS);
   11796 	if ((status & STATUS_LU) == 0) {
   11797 		ifmr->ifm_active |= IFM_NONE;
   11798 		return;
   11799 	}
   11800 
   11801 	ifmr->ifm_status |= IFM_ACTIVE;
   11802 	/* Only 82545 is LX */
   11803 	if (sc->sc_type == WM_T_82545)
   11804 		ifmr->ifm_active |= IFM_1000_LX;
   11805 	else
   11806 		ifmr->ifm_active |= IFM_1000_SX;
   11807 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11808 		ifmr->ifm_active |= IFM_FDX;
   11809 	else
   11810 		ifmr->ifm_active |= IFM_HDX;
   11811 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11812 	if (ctrl & CTRL_RFCE)
   11813 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11814 	if (ctrl & CTRL_TFCE)
   11815 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11816 }
   11817 
   11818 /* XXX TBI only */
   11819 static int
   11820 wm_check_for_link(struct wm_softc *sc)
   11821 {
   11822 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11823 	uint32_t rxcw;
   11824 	uint32_t ctrl;
   11825 	uint32_t status;
   11826 	bool signal;
   11827 
   11828 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11829 		device_xname(sc->sc_dev), __func__));
   11830 
   11831 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11832 		/* XXX need some work for >= 82571 */
   11833 		if (sc->sc_type >= WM_T_82571) {
   11834 			sc->sc_tbi_linkup = 1;
   11835 			return 0;
   11836 		}
   11837 	}
   11838 
   11839 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11840 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11841 	status = CSR_READ(sc, WMREG_STATUS);
   11842 	signal = wm_tbi_havesignal(sc, ctrl);
   11843 
   11844 	DPRINTF(WM_DEBUG_LINK,
   11845 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11846 		device_xname(sc->sc_dev), __func__, signal,
   11847 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11848 
   11849 	/*
   11850 	 * SWDPIN   LU RXCW
   11851 	 *	0    0	  0
   11852 	 *	0    0	  1	(should not happen)
   11853 	 *	0    1	  0	(should not happen)
   11854 	 *	0    1	  1	(should not happen)
   11855 	 *	1    0	  0	Disable autonego and force linkup
   11856 	 *	1    0	  1	got /C/ but not linkup yet
   11857 	 *	1    1	  0	(linkup)
   11858 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11859 	 *
   11860 	 */
   11861 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11862 		DPRINTF(WM_DEBUG_LINK,
   11863 		    ("%s: %s: force linkup and fullduplex\n",
   11864 			device_xname(sc->sc_dev), __func__));
   11865 		sc->sc_tbi_linkup = 0;
   11866 		/* Disable auto-negotiation in the TXCW register */
   11867 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11868 
   11869 		/*
   11870 		 * Force link-up and also force full-duplex.
   11871 		 *
   11872 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11873 		 * so we should update sc->sc_ctrl
   11874 		 */
   11875 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11876 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11877 	} else if (((status & STATUS_LU) != 0)
   11878 	    && ((rxcw & RXCW_C) != 0)
   11879 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11880 		sc->sc_tbi_linkup = 1;
   11881 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   11882 			device_xname(sc->sc_dev),
   11883 			__func__));
   11884 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11885 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11886 	} else if (signal && ((rxcw & RXCW_C) != 0))
   11887 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   11888 			device_xname(sc->sc_dev), __func__));
   11889 	else
   11890 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   11891 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   11892 			status));
   11893 
   11894 	return 0;
   11895 }
   11896 
   11897 /*
   11898  * wm_tbi_tick:
   11899  *
   11900  *	Check the link on TBI devices.
   11901  *	This function acts as mii_tick().
   11902  */
   11903 static void
   11904 wm_tbi_tick(struct wm_softc *sc)
   11905 {
   11906 	struct mii_data *mii = &sc->sc_mii;
   11907 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11908 	uint32_t status;
   11909 
   11910 	KASSERT(WM_CORE_LOCKED(sc));
   11911 
   11912 	status = CSR_READ(sc, WMREG_STATUS);
   11913 
   11914 	/* XXX is this needed? */
   11915 	(void)CSR_READ(sc, WMREG_RXCW);
   11916 	(void)CSR_READ(sc, WMREG_CTRL);
   11917 
   11918 	/* set link status */
   11919 	if ((status & STATUS_LU) == 0) {
   11920 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   11921 			device_xname(sc->sc_dev)));
   11922 		sc->sc_tbi_linkup = 0;
   11923 	} else if (sc->sc_tbi_linkup == 0) {
   11924 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   11925 			device_xname(sc->sc_dev),
   11926 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11927 		sc->sc_tbi_linkup = 1;
   11928 		sc->sc_tbi_serdes_ticks = 0;
   11929 	}
   11930 
   11931 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11932 		goto setled;
   11933 
   11934 	if ((status & STATUS_LU) == 0) {
   11935 		sc->sc_tbi_linkup = 0;
   11936 		/* If the timer expired, retry autonegotiation */
   11937 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11938 		    && (++sc->sc_tbi_serdes_ticks
   11939 			>= sc->sc_tbi_serdes_anegticks)) {
   11940 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11941 			sc->sc_tbi_serdes_ticks = 0;
   11942 			/*
   11943 			 * Reset the link, and let autonegotiation do
   11944 			 * its thing
   11945 			 */
   11946 			sc->sc_ctrl |= CTRL_LRST;
   11947 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11948 			CSR_WRITE_FLUSH(sc);
   11949 			delay(1000);
   11950 			sc->sc_ctrl &= ~CTRL_LRST;
   11951 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11952 			CSR_WRITE_FLUSH(sc);
   11953 			delay(1000);
   11954 			CSR_WRITE(sc, WMREG_TXCW,
   11955 			    sc->sc_txcw & ~TXCW_ANE);
   11956 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11957 		}
   11958 	}
   11959 
   11960 setled:
   11961 	wm_tbi_serdes_set_linkled(sc);
   11962 }
   11963 
   11964 /* SERDES related */
   11965 static void
   11966 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11967 {
   11968 	uint32_t reg;
   11969 
   11970 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11971 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11972 		return;
   11973 
   11974 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11975 	reg |= PCS_CFG_PCS_EN;
   11976 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11977 
   11978 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11979 	reg &= ~CTRL_EXT_SWDPIN(3);
   11980 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11981 	CSR_WRITE_FLUSH(sc);
   11982 }
   11983 
   11984 static int
   11985 wm_serdes_mediachange(struct ifnet *ifp)
   11986 {
   11987 	struct wm_softc *sc = ifp->if_softc;
   11988 	bool pcs_autoneg = true; /* XXX */
   11989 	uint32_t ctrl_ext, pcs_lctl, reg;
   11990 
   11991 	/* XXX Currently, this function is not called on 8257[12] */
   11992 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11993 	    || (sc->sc_type >= WM_T_82575))
   11994 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11995 
   11996 	wm_serdes_power_up_link_82575(sc);
   11997 
   11998 	sc->sc_ctrl |= CTRL_SLU;
   11999 
   12000 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   12001 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   12002 
   12003 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12004 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   12005 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   12006 	case CTRL_EXT_LINK_MODE_SGMII:
   12007 		pcs_autoneg = true;
   12008 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   12009 		break;
   12010 	case CTRL_EXT_LINK_MODE_1000KX:
   12011 		pcs_autoneg = false;
   12012 		/* FALLTHROUGH */
   12013 	default:
   12014 		if ((sc->sc_type == WM_T_82575)
   12015 		    || (sc->sc_type == WM_T_82576)) {
   12016 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   12017 				pcs_autoneg = false;
   12018 		}
   12019 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   12020 		    | CTRL_FRCFDX;
   12021 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   12022 	}
   12023 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   12024 
   12025 	if (pcs_autoneg) {
   12026 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   12027 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   12028 
   12029 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   12030 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   12031 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   12032 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   12033 	} else
   12034 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   12035 
   12036 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   12037 
   12038 
   12039 	return 0;
   12040 }
   12041 
   12042 static void
   12043 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   12044 {
   12045 	struct wm_softc *sc = ifp->if_softc;
   12046 	struct mii_data *mii = &sc->sc_mii;
   12047 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12048 	uint32_t pcs_adv, pcs_lpab, reg;
   12049 
   12050 	ifmr->ifm_status = IFM_AVALID;
   12051 	ifmr->ifm_active = IFM_ETHER;
   12052 
   12053 	/* Check PCS */
   12054 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12055 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12056 		ifmr->ifm_active |= IFM_NONE;
   12057 		sc->sc_tbi_linkup = 0;
   12058 		goto setled;
   12059 	}
   12060 
   12061 	sc->sc_tbi_linkup = 1;
   12062 	ifmr->ifm_status |= IFM_ACTIVE;
   12063 	if (sc->sc_type == WM_T_I354) {
   12064 		uint32_t status;
   12065 
   12066 		status = CSR_READ(sc, WMREG_STATUS);
   12067 		if (((status & STATUS_2P5_SKU) != 0)
   12068 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12069 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   12070 		} else
   12071 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   12072 	} else {
   12073 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12074 		case PCS_LSTS_SPEED_10:
   12075 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12076 			break;
   12077 		case PCS_LSTS_SPEED_100:
   12078 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12079 			break;
   12080 		case PCS_LSTS_SPEED_1000:
   12081 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12082 			break;
   12083 		default:
   12084 			device_printf(sc->sc_dev, "Unknown speed\n");
   12085 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12086 			break;
   12087 		}
   12088 	}
   12089 	if ((reg & PCS_LSTS_FDX) != 0)
   12090 		ifmr->ifm_active |= IFM_FDX;
   12091 	else
   12092 		ifmr->ifm_active |= IFM_HDX;
   12093 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12094 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12095 		/* Check flow */
   12096 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12097 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12098 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12099 			goto setled;
   12100 		}
   12101 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12102 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12103 		DPRINTF(WM_DEBUG_LINK,
   12104 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12105 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12106 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12107 			mii->mii_media_active |= IFM_FLOW
   12108 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12109 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12110 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12111 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12112 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12113 			mii->mii_media_active |= IFM_FLOW
   12114 			    | IFM_ETH_TXPAUSE;
   12115 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12116 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12117 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12118 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12119 			mii->mii_media_active |= IFM_FLOW
   12120 			    | IFM_ETH_RXPAUSE;
   12121 		}
   12122 	}
   12123 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12124 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12125 setled:
   12126 	wm_tbi_serdes_set_linkled(sc);
   12127 }
   12128 
   12129 /*
   12130  * wm_serdes_tick:
   12131  *
   12132  *	Check the link on serdes devices.
   12133  */
   12134 static void
   12135 wm_serdes_tick(struct wm_softc *sc)
   12136 {
   12137 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12138 	struct mii_data *mii = &sc->sc_mii;
   12139 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12140 	uint32_t reg;
   12141 
   12142 	KASSERT(WM_CORE_LOCKED(sc));
   12143 
   12144 	mii->mii_media_status = IFM_AVALID;
   12145 	mii->mii_media_active = IFM_ETHER;
   12146 
   12147 	/* Check PCS */
   12148 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12149 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12150 		mii->mii_media_status |= IFM_ACTIVE;
   12151 		sc->sc_tbi_linkup = 1;
   12152 		sc->sc_tbi_serdes_ticks = 0;
   12153 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12154 		if ((reg & PCS_LSTS_FDX) != 0)
   12155 			mii->mii_media_active |= IFM_FDX;
   12156 		else
   12157 			mii->mii_media_active |= IFM_HDX;
   12158 	} else {
   12159 		mii->mii_media_status |= IFM_NONE;
   12160 		sc->sc_tbi_linkup = 0;
   12161 		/* If the timer expired, retry autonegotiation */
   12162 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12163 		    && (++sc->sc_tbi_serdes_ticks
   12164 			>= sc->sc_tbi_serdes_anegticks)) {
   12165 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12166 			sc->sc_tbi_serdes_ticks = 0;
   12167 			/* XXX */
   12168 			wm_serdes_mediachange(ifp);
   12169 		}
   12170 	}
   12171 
   12172 	wm_tbi_serdes_set_linkled(sc);
   12173 }
   12174 
   12175 /* SFP related */
   12176 
   12177 static int
   12178 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12179 {
   12180 	uint32_t i2ccmd;
   12181 	int i;
   12182 
   12183 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12184 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12185 
   12186 	/* Poll the ready bit */
   12187 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12188 		delay(50);
   12189 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12190 		if (i2ccmd & I2CCMD_READY)
   12191 			break;
   12192 	}
   12193 	if ((i2ccmd & I2CCMD_READY) == 0)
   12194 		return -1;
   12195 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12196 		return -1;
   12197 
   12198 	*data = i2ccmd & 0x00ff;
   12199 
   12200 	return 0;
   12201 }
   12202 
   12203 static uint32_t
   12204 wm_sfp_get_media_type(struct wm_softc *sc)
   12205 {
   12206 	uint32_t ctrl_ext;
   12207 	uint8_t val = 0;
   12208 	int timeout = 3;
   12209 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12210 	int rv = -1;
   12211 
   12212 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12213 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12214 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12215 	CSR_WRITE_FLUSH(sc);
   12216 
   12217 	/* Read SFP module data */
   12218 	while (timeout) {
   12219 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12220 		if (rv == 0)
   12221 			break;
   12222 		delay(100*1000); /* XXX too big */
   12223 		timeout--;
   12224 	}
   12225 	if (rv != 0)
   12226 		goto out;
   12227 	switch (val) {
   12228 	case SFF_SFP_ID_SFF:
   12229 		aprint_normal_dev(sc->sc_dev,
   12230 		    "Module/Connector soldered to board\n");
   12231 		break;
   12232 	case SFF_SFP_ID_SFP:
   12233 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   12234 		break;
   12235 	case SFF_SFP_ID_UNKNOWN:
   12236 		goto out;
   12237 	default:
   12238 		break;
   12239 	}
   12240 
   12241 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12242 	if (rv != 0) {
   12243 		goto out;
   12244 	}
   12245 
   12246 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12247 		mediatype = WM_MEDIATYPE_SERDES;
   12248 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12249 		sc->sc_flags |= WM_F_SGMII;
   12250 		mediatype = WM_MEDIATYPE_COPPER;
   12251 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12252 		sc->sc_flags |= WM_F_SGMII;
   12253 		mediatype = WM_MEDIATYPE_SERDES;
   12254 	}
   12255 
   12256 out:
   12257 	/* Restore I2C interface setting */
   12258 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12259 
   12260 	return mediatype;
   12261 }
   12262 
   12263 /*
   12264  * NVM related.
   12265  * Microwire, SPI (w/wo EERD) and Flash.
   12266  */
   12267 
   12268 /* Both spi and uwire */
   12269 
   12270 /*
   12271  * wm_eeprom_sendbits:
   12272  *
   12273  *	Send a series of bits to the EEPROM.
   12274  */
   12275 static void
   12276 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12277 {
   12278 	uint32_t reg;
   12279 	int x;
   12280 
   12281 	reg = CSR_READ(sc, WMREG_EECD);
   12282 
   12283 	for (x = nbits; x > 0; x--) {
   12284 		if (bits & (1U << (x - 1)))
   12285 			reg |= EECD_DI;
   12286 		else
   12287 			reg &= ~EECD_DI;
   12288 		CSR_WRITE(sc, WMREG_EECD, reg);
   12289 		CSR_WRITE_FLUSH(sc);
   12290 		delay(2);
   12291 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12292 		CSR_WRITE_FLUSH(sc);
   12293 		delay(2);
   12294 		CSR_WRITE(sc, WMREG_EECD, reg);
   12295 		CSR_WRITE_FLUSH(sc);
   12296 		delay(2);
   12297 	}
   12298 }
   12299 
   12300 /*
   12301  * wm_eeprom_recvbits:
   12302  *
   12303  *	Receive a series of bits from the EEPROM.
   12304  */
   12305 static void
   12306 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12307 {
   12308 	uint32_t reg, val;
   12309 	int x;
   12310 
   12311 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12312 
   12313 	val = 0;
   12314 	for (x = nbits; x > 0; x--) {
   12315 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12316 		CSR_WRITE_FLUSH(sc);
   12317 		delay(2);
   12318 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12319 			val |= (1U << (x - 1));
   12320 		CSR_WRITE(sc, WMREG_EECD, reg);
   12321 		CSR_WRITE_FLUSH(sc);
   12322 		delay(2);
   12323 	}
   12324 	*valp = val;
   12325 }
   12326 
   12327 /* Microwire */
   12328 
   12329 /*
   12330  * wm_nvm_read_uwire:
   12331  *
   12332  *	Read a word from the EEPROM using the MicroWire protocol.
   12333  */
   12334 static int
   12335 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12336 {
   12337 	uint32_t reg, val;
   12338 	int i;
   12339 
   12340 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12341 		device_xname(sc->sc_dev), __func__));
   12342 
   12343 	if (sc->nvm.acquire(sc) != 0)
   12344 		return -1;
   12345 
   12346 	for (i = 0; i < wordcnt; i++) {
   12347 		/* Clear SK and DI. */
   12348 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12349 		CSR_WRITE(sc, WMREG_EECD, reg);
   12350 
   12351 		/*
   12352 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12353 		 * and Xen.
   12354 		 *
   12355 		 * We use this workaround only for 82540 because qemu's
   12356 		 * e1000 act as 82540.
   12357 		 */
   12358 		if (sc->sc_type == WM_T_82540) {
   12359 			reg |= EECD_SK;
   12360 			CSR_WRITE(sc, WMREG_EECD, reg);
   12361 			reg &= ~EECD_SK;
   12362 			CSR_WRITE(sc, WMREG_EECD, reg);
   12363 			CSR_WRITE_FLUSH(sc);
   12364 			delay(2);
   12365 		}
   12366 		/* XXX: end of workaround */
   12367 
   12368 		/* Set CHIP SELECT. */
   12369 		reg |= EECD_CS;
   12370 		CSR_WRITE(sc, WMREG_EECD, reg);
   12371 		CSR_WRITE_FLUSH(sc);
   12372 		delay(2);
   12373 
   12374 		/* Shift in the READ command. */
   12375 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12376 
   12377 		/* Shift in address. */
   12378 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12379 
   12380 		/* Shift out the data. */
   12381 		wm_eeprom_recvbits(sc, &val, 16);
   12382 		data[i] = val & 0xffff;
   12383 
   12384 		/* Clear CHIP SELECT. */
   12385 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12386 		CSR_WRITE(sc, WMREG_EECD, reg);
   12387 		CSR_WRITE_FLUSH(sc);
   12388 		delay(2);
   12389 	}
   12390 
   12391 	sc->nvm.release(sc);
   12392 	return 0;
   12393 }
   12394 
   12395 /* SPI */
   12396 
   12397 /*
   12398  * Set SPI and FLASH related information from the EECD register.
   12399  * For 82541 and 82547, the word size is taken from EEPROM.
   12400  */
   12401 static int
   12402 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12403 {
   12404 	int size;
   12405 	uint32_t reg;
   12406 	uint16_t data;
   12407 
   12408 	reg = CSR_READ(sc, WMREG_EECD);
   12409 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12410 
   12411 	/* Read the size of NVM from EECD by default */
   12412 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12413 	switch (sc->sc_type) {
   12414 	case WM_T_82541:
   12415 	case WM_T_82541_2:
   12416 	case WM_T_82547:
   12417 	case WM_T_82547_2:
   12418 		/* Set dummy value to access EEPROM */
   12419 		sc->sc_nvm_wordsize = 64;
   12420 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12421 			aprint_error_dev(sc->sc_dev,
   12422 			    "%s: failed to read EEPROM size\n", __func__);
   12423 		}
   12424 		reg = data;
   12425 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12426 		if (size == 0)
   12427 			size = 6; /* 64 word size */
   12428 		else
   12429 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12430 		break;
   12431 	case WM_T_80003:
   12432 	case WM_T_82571:
   12433 	case WM_T_82572:
   12434 	case WM_T_82573: /* SPI case */
   12435 	case WM_T_82574: /* SPI case */
   12436 	case WM_T_82583: /* SPI case */
   12437 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12438 		if (size > 14)
   12439 			size = 14;
   12440 		break;
   12441 	case WM_T_82575:
   12442 	case WM_T_82576:
   12443 	case WM_T_82580:
   12444 	case WM_T_I350:
   12445 	case WM_T_I354:
   12446 	case WM_T_I210:
   12447 	case WM_T_I211:
   12448 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12449 		if (size > 15)
   12450 			size = 15;
   12451 		break;
   12452 	default:
   12453 		aprint_error_dev(sc->sc_dev,
   12454 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12455 		return -1;
   12456 		break;
   12457 	}
   12458 
   12459 	sc->sc_nvm_wordsize = 1 << size;
   12460 
   12461 	return 0;
   12462 }
   12463 
   12464 /*
   12465  * wm_nvm_ready_spi:
   12466  *
   12467  *	Wait for a SPI EEPROM to be ready for commands.
   12468  */
   12469 static int
   12470 wm_nvm_ready_spi(struct wm_softc *sc)
   12471 {
   12472 	uint32_t val;
   12473 	int usec;
   12474 
   12475 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12476 		device_xname(sc->sc_dev), __func__));
   12477 
   12478 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12479 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12480 		wm_eeprom_recvbits(sc, &val, 8);
   12481 		if ((val & SPI_SR_RDY) == 0)
   12482 			break;
   12483 	}
   12484 	if (usec >= SPI_MAX_RETRIES) {
   12485 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12486 		return -1;
   12487 	}
   12488 	return 0;
   12489 }
   12490 
   12491 /*
   12492  * wm_nvm_read_spi:
   12493  *
   12494  *	Read a work from the EEPROM using the SPI protocol.
   12495  */
   12496 static int
   12497 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12498 {
   12499 	uint32_t reg, val;
   12500 	int i;
   12501 	uint8_t opc;
   12502 	int rv = 0;
   12503 
   12504 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12505 		device_xname(sc->sc_dev), __func__));
   12506 
   12507 	if (sc->nvm.acquire(sc) != 0)
   12508 		return -1;
   12509 
   12510 	/* Clear SK and CS. */
   12511 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12512 	CSR_WRITE(sc, WMREG_EECD, reg);
   12513 	CSR_WRITE_FLUSH(sc);
   12514 	delay(2);
   12515 
   12516 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12517 		goto out;
   12518 
   12519 	/* Toggle CS to flush commands. */
   12520 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12521 	CSR_WRITE_FLUSH(sc);
   12522 	delay(2);
   12523 	CSR_WRITE(sc, WMREG_EECD, reg);
   12524 	CSR_WRITE_FLUSH(sc);
   12525 	delay(2);
   12526 
   12527 	opc = SPI_OPC_READ;
   12528 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12529 		opc |= SPI_OPC_A8;
   12530 
   12531 	wm_eeprom_sendbits(sc, opc, 8);
   12532 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12533 
   12534 	for (i = 0; i < wordcnt; i++) {
   12535 		wm_eeprom_recvbits(sc, &val, 16);
   12536 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12537 	}
   12538 
   12539 	/* Raise CS and clear SK. */
   12540 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12541 	CSR_WRITE(sc, WMREG_EECD, reg);
   12542 	CSR_WRITE_FLUSH(sc);
   12543 	delay(2);
   12544 
   12545 out:
   12546 	sc->nvm.release(sc);
   12547 	return rv;
   12548 }
   12549 
   12550 /* Using with EERD */
   12551 
   12552 static int
   12553 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12554 {
   12555 	uint32_t attempts = 100000;
   12556 	uint32_t i, reg = 0;
   12557 	int32_t done = -1;
   12558 
   12559 	for (i = 0; i < attempts; i++) {
   12560 		reg = CSR_READ(sc, rw);
   12561 
   12562 		if (reg & EERD_DONE) {
   12563 			done = 0;
   12564 			break;
   12565 		}
   12566 		delay(5);
   12567 	}
   12568 
   12569 	return done;
   12570 }
   12571 
   12572 static int
   12573 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12574 {
   12575 	int i, eerd = 0;
   12576 	int rv = 0;
   12577 
   12578 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12579 		device_xname(sc->sc_dev), __func__));
   12580 
   12581 	if (sc->nvm.acquire(sc) != 0)
   12582 		return -1;
   12583 
   12584 	for (i = 0; i < wordcnt; i++) {
   12585 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12586 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12587 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12588 		if (rv != 0) {
   12589 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12590 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12591 			break;
   12592 		}
   12593 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12594 	}
   12595 
   12596 	sc->nvm.release(sc);
   12597 	return rv;
   12598 }
   12599 
   12600 /* Flash */
   12601 
   12602 static int
   12603 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12604 {
   12605 	uint32_t eecd;
   12606 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12607 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12608 	uint32_t nvm_dword = 0;
   12609 	uint8_t sig_byte = 0;
   12610 	int rv;
   12611 
   12612 	switch (sc->sc_type) {
   12613 	case WM_T_PCH_SPT:
   12614 	case WM_T_PCH_CNP:
   12615 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12616 		act_offset = ICH_NVM_SIG_WORD * 2;
   12617 
   12618 		/* set bank to 0 in case flash read fails. */
   12619 		*bank = 0;
   12620 
   12621 		/* Check bank 0 */
   12622 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12623 		if (rv != 0)
   12624 			return rv;
   12625 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12626 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12627 			*bank = 0;
   12628 			return 0;
   12629 		}
   12630 
   12631 		/* Check bank 1 */
   12632 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12633 		    &nvm_dword);
   12634 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12635 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12636 			*bank = 1;
   12637 			return 0;
   12638 		}
   12639 		aprint_error_dev(sc->sc_dev,
   12640 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12641 		return -1;
   12642 	case WM_T_ICH8:
   12643 	case WM_T_ICH9:
   12644 		eecd = CSR_READ(sc, WMREG_EECD);
   12645 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12646 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12647 			return 0;
   12648 		}
   12649 		/* FALLTHROUGH */
   12650 	default:
   12651 		/* Default to 0 */
   12652 		*bank = 0;
   12653 
   12654 		/* Check bank 0 */
   12655 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12656 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12657 			*bank = 0;
   12658 			return 0;
   12659 		}
   12660 
   12661 		/* Check bank 1 */
   12662 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12663 		    &sig_byte);
   12664 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12665 			*bank = 1;
   12666 			return 0;
   12667 		}
   12668 	}
   12669 
   12670 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12671 		device_xname(sc->sc_dev)));
   12672 	return -1;
   12673 }
   12674 
   12675 /******************************************************************************
   12676  * This function does initial flash setup so that a new read/write/erase cycle
   12677  * can be started.
   12678  *
   12679  * sc - The pointer to the hw structure
   12680  ****************************************************************************/
   12681 static int32_t
   12682 wm_ich8_cycle_init(struct wm_softc *sc)
   12683 {
   12684 	uint16_t hsfsts;
   12685 	int32_t error = 1;
   12686 	int32_t i     = 0;
   12687 
   12688 	if (sc->sc_type >= WM_T_PCH_SPT)
   12689 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12690 	else
   12691 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12692 
   12693 	/* May be check the Flash Des Valid bit in Hw status */
   12694 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12695 		return error;
   12696 
   12697 	/* Clear FCERR in Hw status by writing 1 */
   12698 	/* Clear DAEL in Hw status by writing a 1 */
   12699 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12700 
   12701 	if (sc->sc_type >= WM_T_PCH_SPT)
   12702 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12703 	else
   12704 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12705 
   12706 	/*
   12707 	 * Either we should have a hardware SPI cycle in progress bit to check
   12708 	 * against, in order to start a new cycle or FDONE bit should be
   12709 	 * changed in the hardware so that it is 1 after harware reset, which
   12710 	 * can then be used as an indication whether a cycle is in progress or
   12711 	 * has been completed .. we should also have some software semaphore
   12712 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12713 	 * threads access to those bits can be sequentiallized or a way so that
   12714 	 * 2 threads dont start the cycle at the same time
   12715 	 */
   12716 
   12717 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12718 		/*
   12719 		 * There is no cycle running at present, so we can start a
   12720 		 * cycle
   12721 		 */
   12722 
   12723 		/* Begin by setting Flash Cycle Done. */
   12724 		hsfsts |= HSFSTS_DONE;
   12725 		if (sc->sc_type >= WM_T_PCH_SPT)
   12726 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12727 			    hsfsts & 0xffffUL);
   12728 		else
   12729 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12730 		error = 0;
   12731 	} else {
   12732 		/*
   12733 		 * otherwise poll for sometime so the current cycle has a
   12734 		 * chance to end before giving up.
   12735 		 */
   12736 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12737 			if (sc->sc_type >= WM_T_PCH_SPT)
   12738 				hsfsts = ICH8_FLASH_READ32(sc,
   12739 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12740 			else
   12741 				hsfsts = ICH8_FLASH_READ16(sc,
   12742 				    ICH_FLASH_HSFSTS);
   12743 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12744 				error = 0;
   12745 				break;
   12746 			}
   12747 			delay(1);
   12748 		}
   12749 		if (error == 0) {
   12750 			/*
   12751 			 * Successful in waiting for previous cycle to timeout,
   12752 			 * now set the Flash Cycle Done.
   12753 			 */
   12754 			hsfsts |= HSFSTS_DONE;
   12755 			if (sc->sc_type >= WM_T_PCH_SPT)
   12756 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12757 				    hsfsts & 0xffffUL);
   12758 			else
   12759 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12760 				    hsfsts);
   12761 		}
   12762 	}
   12763 	return error;
   12764 }
   12765 
   12766 /******************************************************************************
   12767  * This function starts a flash cycle and waits for its completion
   12768  *
   12769  * sc - The pointer to the hw structure
   12770  ****************************************************************************/
   12771 static int32_t
   12772 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12773 {
   12774 	uint16_t hsflctl;
   12775 	uint16_t hsfsts;
   12776 	int32_t error = 1;
   12777 	uint32_t i = 0;
   12778 
   12779 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12780 	if (sc->sc_type >= WM_T_PCH_SPT)
   12781 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12782 	else
   12783 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12784 	hsflctl |= HSFCTL_GO;
   12785 	if (sc->sc_type >= WM_T_PCH_SPT)
   12786 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12787 		    (uint32_t)hsflctl << 16);
   12788 	else
   12789 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12790 
   12791 	/* Wait till FDONE bit is set to 1 */
   12792 	do {
   12793 		if (sc->sc_type >= WM_T_PCH_SPT)
   12794 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12795 			    & 0xffffUL;
   12796 		else
   12797 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12798 		if (hsfsts & HSFSTS_DONE)
   12799 			break;
   12800 		delay(1);
   12801 		i++;
   12802 	} while (i < timeout);
   12803 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12804 		error = 0;
   12805 
   12806 	return error;
   12807 }
   12808 
   12809 /******************************************************************************
   12810  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12811  *
   12812  * sc - The pointer to the hw structure
   12813  * index - The index of the byte or word to read.
   12814  * size - Size of data to read, 1=byte 2=word, 4=dword
   12815  * data - Pointer to the word to store the value read.
   12816  *****************************************************************************/
   12817 static int32_t
   12818 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12819     uint32_t size, uint32_t *data)
   12820 {
   12821 	uint16_t hsfsts;
   12822 	uint16_t hsflctl;
   12823 	uint32_t flash_linear_address;
   12824 	uint32_t flash_data = 0;
   12825 	int32_t error = 1;
   12826 	int32_t count = 0;
   12827 
   12828 	if (size < 1  || size > 4 || data == 0x0 ||
   12829 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12830 		return error;
   12831 
   12832 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12833 	    sc->sc_ich8_flash_base;
   12834 
   12835 	do {
   12836 		delay(1);
   12837 		/* Steps */
   12838 		error = wm_ich8_cycle_init(sc);
   12839 		if (error)
   12840 			break;
   12841 
   12842 		if (sc->sc_type >= WM_T_PCH_SPT)
   12843 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12844 			    >> 16;
   12845 		else
   12846 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12847 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12848 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12849 		    & HSFCTL_BCOUNT_MASK;
   12850 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12851 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12852 			/*
   12853 			 * In SPT, This register is in Lan memory space, not
   12854 			 * flash. Therefore, only 32 bit access is supported.
   12855 			 */
   12856 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12857 			    (uint32_t)hsflctl << 16);
   12858 		} else
   12859 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12860 
   12861 		/*
   12862 		 * Write the last 24 bits of index into Flash Linear address
   12863 		 * field in Flash Address
   12864 		 */
   12865 		/* TODO: TBD maybe check the index against the size of flash */
   12866 
   12867 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12868 
   12869 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12870 
   12871 		/*
   12872 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12873 		 * the whole sequence a few more times, else read in (shift in)
   12874 		 * the Flash Data0, the order is least significant byte first
   12875 		 * msb to lsb
   12876 		 */
   12877 		if (error == 0) {
   12878 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12879 			if (size == 1)
   12880 				*data = (uint8_t)(flash_data & 0x000000FF);
   12881 			else if (size == 2)
   12882 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12883 			else if (size == 4)
   12884 				*data = (uint32_t)flash_data;
   12885 			break;
   12886 		} else {
   12887 			/*
   12888 			 * If we've gotten here, then things are probably
   12889 			 * completely hosed, but if the error condition is
   12890 			 * detected, it won't hurt to give it another try...
   12891 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12892 			 */
   12893 			if (sc->sc_type >= WM_T_PCH_SPT)
   12894 				hsfsts = ICH8_FLASH_READ32(sc,
   12895 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12896 			else
   12897 				hsfsts = ICH8_FLASH_READ16(sc,
   12898 				    ICH_FLASH_HSFSTS);
   12899 
   12900 			if (hsfsts & HSFSTS_ERR) {
   12901 				/* Repeat for some time before giving up. */
   12902 				continue;
   12903 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12904 				break;
   12905 		}
   12906 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12907 
   12908 	return error;
   12909 }
   12910 
   12911 /******************************************************************************
   12912  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12913  *
   12914  * sc - pointer to wm_hw structure
   12915  * index - The index of the byte to read.
   12916  * data - Pointer to a byte to store the value read.
   12917  *****************************************************************************/
   12918 static int32_t
   12919 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12920 {
   12921 	int32_t status;
   12922 	uint32_t word = 0;
   12923 
   12924 	status = wm_read_ich8_data(sc, index, 1, &word);
   12925 	if (status == 0)
   12926 		*data = (uint8_t)word;
   12927 	else
   12928 		*data = 0;
   12929 
   12930 	return status;
   12931 }
   12932 
   12933 /******************************************************************************
   12934  * Reads a word from the NVM using the ICH8 flash access registers.
   12935  *
   12936  * sc - pointer to wm_hw structure
   12937  * index - The starting byte index of the word to read.
   12938  * data - Pointer to a word to store the value read.
   12939  *****************************************************************************/
   12940 static int32_t
   12941 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12942 {
   12943 	int32_t status;
   12944 	uint32_t word = 0;
   12945 
   12946 	status = wm_read_ich8_data(sc, index, 2, &word);
   12947 	if (status == 0)
   12948 		*data = (uint16_t)word;
   12949 	else
   12950 		*data = 0;
   12951 
   12952 	return status;
   12953 }
   12954 
   12955 /******************************************************************************
   12956  * Reads a dword from the NVM using the ICH8 flash access registers.
   12957  *
   12958  * sc - pointer to wm_hw structure
   12959  * index - The starting byte index of the word to read.
   12960  * data - Pointer to a word to store the value read.
   12961  *****************************************************************************/
   12962 static int32_t
   12963 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12964 {
   12965 	int32_t status;
   12966 
   12967 	status = wm_read_ich8_data(sc, index, 4, data);
   12968 	return status;
   12969 }
   12970 
   12971 /******************************************************************************
   12972  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12973  * register.
   12974  *
   12975  * sc - Struct containing variables accessed by shared code
   12976  * offset - offset of word in the EEPROM to read
   12977  * data - word read from the EEPROM
   12978  * words - number of words to read
   12979  *****************************************************************************/
   12980 static int
   12981 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12982 {
   12983 	int32_t	 rv = 0;
   12984 	uint32_t flash_bank = 0;
   12985 	uint32_t act_offset = 0;
   12986 	uint32_t bank_offset = 0;
   12987 	uint16_t word = 0;
   12988 	uint16_t i = 0;
   12989 
   12990 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12991 		device_xname(sc->sc_dev), __func__));
   12992 
   12993 	if (sc->nvm.acquire(sc) != 0)
   12994 		return -1;
   12995 
   12996 	/*
   12997 	 * We need to know which is the valid flash bank.  In the event
   12998 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12999 	 * managing flash_bank. So it cannot be trusted and needs
   13000 	 * to be updated with each read.
   13001 	 */
   13002 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13003 	if (rv) {
   13004 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13005 			device_xname(sc->sc_dev)));
   13006 		flash_bank = 0;
   13007 	}
   13008 
   13009 	/*
   13010 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13011 	 * size
   13012 	 */
   13013 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13014 
   13015 	for (i = 0; i < words; i++) {
   13016 		/* The NVM part needs a byte offset, hence * 2 */
   13017 		act_offset = bank_offset + ((offset + i) * 2);
   13018 		rv = wm_read_ich8_word(sc, act_offset, &word);
   13019 		if (rv) {
   13020 			aprint_error_dev(sc->sc_dev,
   13021 			    "%s: failed to read NVM\n", __func__);
   13022 			break;
   13023 		}
   13024 		data[i] = word;
   13025 	}
   13026 
   13027 	sc->nvm.release(sc);
   13028 	return rv;
   13029 }
   13030 
   13031 /******************************************************************************
   13032  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   13033  * register.
   13034  *
   13035  * sc - Struct containing variables accessed by shared code
   13036  * offset - offset of word in the EEPROM to read
   13037  * data - word read from the EEPROM
   13038  * words - number of words to read
   13039  *****************************************************************************/
   13040 static int
   13041 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13042 {
   13043 	int32_t	 rv = 0;
   13044 	uint32_t flash_bank = 0;
   13045 	uint32_t act_offset = 0;
   13046 	uint32_t bank_offset = 0;
   13047 	uint32_t dword = 0;
   13048 	uint16_t i = 0;
   13049 
   13050 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13051 		device_xname(sc->sc_dev), __func__));
   13052 
   13053 	if (sc->nvm.acquire(sc) != 0)
   13054 		return -1;
   13055 
   13056 	/*
   13057 	 * We need to know which is the valid flash bank.  In the event
   13058 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13059 	 * managing flash_bank. So it cannot be trusted and needs
   13060 	 * to be updated with each read.
   13061 	 */
   13062 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13063 	if (rv) {
   13064 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13065 			device_xname(sc->sc_dev)));
   13066 		flash_bank = 0;
   13067 	}
   13068 
   13069 	/*
   13070 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13071 	 * size
   13072 	 */
   13073 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13074 
   13075 	for (i = 0; i < words; i++) {
   13076 		/* The NVM part needs a byte offset, hence * 2 */
   13077 		act_offset = bank_offset + ((offset + i) * 2);
   13078 		/* but we must read dword aligned, so mask ... */
   13079 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13080 		if (rv) {
   13081 			aprint_error_dev(sc->sc_dev,
   13082 			    "%s: failed to read NVM\n", __func__);
   13083 			break;
   13084 		}
   13085 		/* ... and pick out low or high word */
   13086 		if ((act_offset & 0x2) == 0)
   13087 			data[i] = (uint16_t)(dword & 0xFFFF);
   13088 		else
   13089 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13090 	}
   13091 
   13092 	sc->nvm.release(sc);
   13093 	return rv;
   13094 }
   13095 
   13096 /* iNVM */
   13097 
   13098 static int
   13099 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13100 {
   13101 	int32_t	 rv = 0;
   13102 	uint32_t invm_dword;
   13103 	uint16_t i;
   13104 	uint8_t record_type, word_address;
   13105 
   13106 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13107 		device_xname(sc->sc_dev), __func__));
   13108 
   13109 	for (i = 0; i < INVM_SIZE; i++) {
   13110 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13111 		/* Get record type */
   13112 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13113 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13114 			break;
   13115 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13116 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13117 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13118 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13119 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13120 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13121 			if (word_address == address) {
   13122 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13123 				rv = 0;
   13124 				break;
   13125 			}
   13126 		}
   13127 	}
   13128 
   13129 	return rv;
   13130 }
   13131 
   13132 static int
   13133 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13134 {
   13135 	int rv = 0;
   13136 	int i;
   13137 
   13138 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13139 		device_xname(sc->sc_dev), __func__));
   13140 
   13141 	if (sc->nvm.acquire(sc) != 0)
   13142 		return -1;
   13143 
   13144 	for (i = 0; i < words; i++) {
   13145 		switch (offset + i) {
   13146 		case NVM_OFF_MACADDR:
   13147 		case NVM_OFF_MACADDR1:
   13148 		case NVM_OFF_MACADDR2:
   13149 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13150 			if (rv != 0) {
   13151 				data[i] = 0xffff;
   13152 				rv = -1;
   13153 			}
   13154 			break;
   13155 		case NVM_OFF_CFG2:
   13156 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13157 			if (rv != 0) {
   13158 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13159 				rv = 0;
   13160 			}
   13161 			break;
   13162 		case NVM_OFF_CFG4:
   13163 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13164 			if (rv != 0) {
   13165 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13166 				rv = 0;
   13167 			}
   13168 			break;
   13169 		case NVM_OFF_LED_1_CFG:
   13170 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13171 			if (rv != 0) {
   13172 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13173 				rv = 0;
   13174 			}
   13175 			break;
   13176 		case NVM_OFF_LED_0_2_CFG:
   13177 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13178 			if (rv != 0) {
   13179 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13180 				rv = 0;
   13181 			}
   13182 			break;
   13183 		case NVM_OFF_ID_LED_SETTINGS:
   13184 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13185 			if (rv != 0) {
   13186 				*data = ID_LED_RESERVED_FFFF;
   13187 				rv = 0;
   13188 			}
   13189 			break;
   13190 		default:
   13191 			DPRINTF(WM_DEBUG_NVM,
   13192 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13193 			*data = NVM_RESERVED_WORD;
   13194 			break;
   13195 		}
   13196 	}
   13197 
   13198 	sc->nvm.release(sc);
   13199 	return rv;
   13200 }
   13201 
   13202 /* Lock, detecting NVM type, validate checksum, version and read */
   13203 
   13204 static int
   13205 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13206 {
   13207 	uint32_t eecd = 0;
   13208 
   13209 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13210 	    || sc->sc_type == WM_T_82583) {
   13211 		eecd = CSR_READ(sc, WMREG_EECD);
   13212 
   13213 		/* Isolate bits 15 & 16 */
   13214 		eecd = ((eecd >> 15) & 0x03);
   13215 
   13216 		/* If both bits are set, device is Flash type */
   13217 		if (eecd == 0x03)
   13218 			return 0;
   13219 	}
   13220 	return 1;
   13221 }
   13222 
   13223 static int
   13224 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13225 {
   13226 	uint32_t eec;
   13227 
   13228 	eec = CSR_READ(sc, WMREG_EEC);
   13229 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13230 		return 1;
   13231 
   13232 	return 0;
   13233 }
   13234 
   13235 /*
   13236  * wm_nvm_validate_checksum
   13237  *
   13238  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13239  */
   13240 static int
   13241 wm_nvm_validate_checksum(struct wm_softc *sc)
   13242 {
   13243 	uint16_t checksum;
   13244 	uint16_t eeprom_data;
   13245 #ifdef WM_DEBUG
   13246 	uint16_t csum_wordaddr, valid_checksum;
   13247 #endif
   13248 	int i;
   13249 
   13250 	checksum = 0;
   13251 
   13252 	/* Don't check for I211 */
   13253 	if (sc->sc_type == WM_T_I211)
   13254 		return 0;
   13255 
   13256 #ifdef WM_DEBUG
   13257 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13258 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13259 		csum_wordaddr = NVM_OFF_COMPAT;
   13260 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13261 	} else {
   13262 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13263 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13264 	}
   13265 
   13266 	/* Dump EEPROM image for debug */
   13267 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13268 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13269 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13270 		/* XXX PCH_SPT? */
   13271 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13272 		if ((eeprom_data & valid_checksum) == 0)
   13273 			DPRINTF(WM_DEBUG_NVM,
   13274 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13275 				device_xname(sc->sc_dev), eeprom_data,
   13276 				    valid_checksum));
   13277 	}
   13278 
   13279 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13280 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13281 		for (i = 0; i < NVM_SIZE; i++) {
   13282 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13283 				printf("XXXX ");
   13284 			else
   13285 				printf("%04hx ", eeprom_data);
   13286 			if (i % 8 == 7)
   13287 				printf("\n");
   13288 		}
   13289 	}
   13290 
   13291 #endif /* WM_DEBUG */
   13292 
   13293 	for (i = 0; i < NVM_SIZE; i++) {
   13294 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13295 			return 1;
   13296 		checksum += eeprom_data;
   13297 	}
   13298 
   13299 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13300 #ifdef WM_DEBUG
   13301 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13302 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13303 #endif
   13304 	}
   13305 
   13306 	return 0;
   13307 }
   13308 
   13309 static void
   13310 wm_nvm_version_invm(struct wm_softc *sc)
   13311 {
   13312 	uint32_t dword;
   13313 
   13314 	/*
   13315 	 * Linux's code to decode version is very strange, so we don't
   13316 	 * obey that algorithm and just use word 61 as the document.
   13317 	 * Perhaps it's not perfect though...
   13318 	 *
   13319 	 * Example:
   13320 	 *
   13321 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13322 	 */
   13323 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13324 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13325 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13326 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13327 }
   13328 
   13329 static void
   13330 wm_nvm_version(struct wm_softc *sc)
   13331 {
   13332 	uint16_t major, minor, build, patch;
   13333 	uint16_t uid0, uid1;
   13334 	uint16_t nvm_data;
   13335 	uint16_t off;
   13336 	bool check_version = false;
   13337 	bool check_optionrom = false;
   13338 	bool have_build = false;
   13339 	bool have_uid = true;
   13340 
   13341 	/*
   13342 	 * Version format:
   13343 	 *
   13344 	 * XYYZ
   13345 	 * X0YZ
   13346 	 * X0YY
   13347 	 *
   13348 	 * Example:
   13349 	 *
   13350 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13351 	 *	82571	0x50a6	5.10.6?
   13352 	 *	82572	0x506a	5.6.10?
   13353 	 *	82572EI	0x5069	5.6.9?
   13354 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13355 	 *		0x2013	2.1.3?
   13356 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   13357 	 */
   13358 
   13359 	/*
   13360 	 * XXX
   13361 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13362 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13363 	 */
   13364 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13365 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13366 		have_uid = false;
   13367 
   13368 	switch (sc->sc_type) {
   13369 	case WM_T_82571:
   13370 	case WM_T_82572:
   13371 	case WM_T_82574:
   13372 	case WM_T_82583:
   13373 		check_version = true;
   13374 		check_optionrom = true;
   13375 		have_build = true;
   13376 		break;
   13377 	case WM_T_82575:
   13378 	case WM_T_82576:
   13379 	case WM_T_82580:
   13380 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13381 			check_version = true;
   13382 		break;
   13383 	case WM_T_I211:
   13384 		wm_nvm_version_invm(sc);
   13385 		have_uid = false;
   13386 		goto printver;
   13387 	case WM_T_I210:
   13388 		if (!wm_nvm_flash_presence_i210(sc)) {
   13389 			wm_nvm_version_invm(sc);
   13390 			have_uid = false;
   13391 			goto printver;
   13392 		}
   13393 		/* FALLTHROUGH */
   13394 	case WM_T_I350:
   13395 	case WM_T_I354:
   13396 		check_version = true;
   13397 		check_optionrom = true;
   13398 		break;
   13399 	default:
   13400 		return;
   13401 	}
   13402 	if (check_version
   13403 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13404 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13405 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13406 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13407 			build = nvm_data & NVM_BUILD_MASK;
   13408 			have_build = true;
   13409 		} else
   13410 			minor = nvm_data & 0x00ff;
   13411 
   13412 		/* Decimal */
   13413 		minor = (minor / 16) * 10 + (minor % 16);
   13414 		sc->sc_nvm_ver_major = major;
   13415 		sc->sc_nvm_ver_minor = minor;
   13416 
   13417 printver:
   13418 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13419 		    sc->sc_nvm_ver_minor);
   13420 		if (have_build) {
   13421 			sc->sc_nvm_ver_build = build;
   13422 			aprint_verbose(".%d", build);
   13423 		}
   13424 	}
   13425 
   13426 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13427 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13428 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13429 		/* Option ROM Version */
   13430 		if ((off != 0x0000) && (off != 0xffff)) {
   13431 			int rv;
   13432 
   13433 			off += NVM_COMBO_VER_OFF;
   13434 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13435 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13436 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13437 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13438 				/* 16bits */
   13439 				major = uid0 >> 8;
   13440 				build = (uid0 << 8) | (uid1 >> 8);
   13441 				patch = uid1 & 0x00ff;
   13442 				aprint_verbose(", option ROM Version %d.%d.%d",
   13443 				    major, build, patch);
   13444 			}
   13445 		}
   13446 	}
   13447 
   13448 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13449 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13450 }
   13451 
   13452 /*
   13453  * wm_nvm_read:
   13454  *
   13455  *	Read data from the serial EEPROM.
   13456  */
   13457 static int
   13458 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13459 {
   13460 	int rv;
   13461 
   13462 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13463 		device_xname(sc->sc_dev), __func__));
   13464 
   13465 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13466 		return -1;
   13467 
   13468 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13469 
   13470 	return rv;
   13471 }
   13472 
   13473 /*
   13474  * Hardware semaphores.
   13475  * Very complexed...
   13476  */
   13477 
   13478 static int
   13479 wm_get_null(struct wm_softc *sc)
   13480 {
   13481 
   13482 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13483 		device_xname(sc->sc_dev), __func__));
   13484 	return 0;
   13485 }
   13486 
   13487 static void
   13488 wm_put_null(struct wm_softc *sc)
   13489 {
   13490 
   13491 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13492 		device_xname(sc->sc_dev), __func__));
   13493 	return;
   13494 }
   13495 
   13496 static int
   13497 wm_get_eecd(struct wm_softc *sc)
   13498 {
   13499 	uint32_t reg;
   13500 	int x;
   13501 
   13502 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13503 		device_xname(sc->sc_dev), __func__));
   13504 
   13505 	reg = CSR_READ(sc, WMREG_EECD);
   13506 
   13507 	/* Request EEPROM access. */
   13508 	reg |= EECD_EE_REQ;
   13509 	CSR_WRITE(sc, WMREG_EECD, reg);
   13510 
   13511 	/* ..and wait for it to be granted. */
   13512 	for (x = 0; x < 1000; x++) {
   13513 		reg = CSR_READ(sc, WMREG_EECD);
   13514 		if (reg & EECD_EE_GNT)
   13515 			break;
   13516 		delay(5);
   13517 	}
   13518 	if ((reg & EECD_EE_GNT) == 0) {
   13519 		aprint_error_dev(sc->sc_dev,
   13520 		    "could not acquire EEPROM GNT\n");
   13521 		reg &= ~EECD_EE_REQ;
   13522 		CSR_WRITE(sc, WMREG_EECD, reg);
   13523 		return -1;
   13524 	}
   13525 
   13526 	return 0;
   13527 }
   13528 
   13529 static void
   13530 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13531 {
   13532 
   13533 	*eecd |= EECD_SK;
   13534 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13535 	CSR_WRITE_FLUSH(sc);
   13536 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13537 		delay(1);
   13538 	else
   13539 		delay(50);
   13540 }
   13541 
   13542 static void
   13543 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13544 {
   13545 
   13546 	*eecd &= ~EECD_SK;
   13547 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13548 	CSR_WRITE_FLUSH(sc);
   13549 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13550 		delay(1);
   13551 	else
   13552 		delay(50);
   13553 }
   13554 
   13555 static void
   13556 wm_put_eecd(struct wm_softc *sc)
   13557 {
   13558 	uint32_t reg;
   13559 
   13560 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13561 		device_xname(sc->sc_dev), __func__));
   13562 
   13563 	/* Stop nvm */
   13564 	reg = CSR_READ(sc, WMREG_EECD);
   13565 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13566 		/* Pull CS high */
   13567 		reg |= EECD_CS;
   13568 		wm_nvm_eec_clock_lower(sc, &reg);
   13569 	} else {
   13570 		/* CS on Microwire is active-high */
   13571 		reg &= ~(EECD_CS | EECD_DI);
   13572 		CSR_WRITE(sc, WMREG_EECD, reg);
   13573 		wm_nvm_eec_clock_raise(sc, &reg);
   13574 		wm_nvm_eec_clock_lower(sc, &reg);
   13575 	}
   13576 
   13577 	reg = CSR_READ(sc, WMREG_EECD);
   13578 	reg &= ~EECD_EE_REQ;
   13579 	CSR_WRITE(sc, WMREG_EECD, reg);
   13580 
   13581 	return;
   13582 }
   13583 
   13584 /*
   13585  * Get hardware semaphore.
   13586  * Same as e1000_get_hw_semaphore_generic()
   13587  */
   13588 static int
   13589 wm_get_swsm_semaphore(struct wm_softc *sc)
   13590 {
   13591 	int32_t timeout;
   13592 	uint32_t swsm;
   13593 
   13594 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13595 		device_xname(sc->sc_dev), __func__));
   13596 	KASSERT(sc->sc_nvm_wordsize > 0);
   13597 
   13598 retry:
   13599 	/* Get the SW semaphore. */
   13600 	timeout = sc->sc_nvm_wordsize + 1;
   13601 	while (timeout) {
   13602 		swsm = CSR_READ(sc, WMREG_SWSM);
   13603 
   13604 		if ((swsm & SWSM_SMBI) == 0)
   13605 			break;
   13606 
   13607 		delay(50);
   13608 		timeout--;
   13609 	}
   13610 
   13611 	if (timeout == 0) {
   13612 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13613 			/*
   13614 			 * In rare circumstances, the SW semaphore may already
   13615 			 * be held unintentionally. Clear the semaphore once
   13616 			 * before giving up.
   13617 			 */
   13618 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13619 			wm_put_swsm_semaphore(sc);
   13620 			goto retry;
   13621 		}
   13622 		aprint_error_dev(sc->sc_dev,
   13623 		    "could not acquire SWSM SMBI\n");
   13624 		return 1;
   13625 	}
   13626 
   13627 	/* Get the FW semaphore. */
   13628 	timeout = sc->sc_nvm_wordsize + 1;
   13629 	while (timeout) {
   13630 		swsm = CSR_READ(sc, WMREG_SWSM);
   13631 		swsm |= SWSM_SWESMBI;
   13632 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13633 		/* If we managed to set the bit we got the semaphore. */
   13634 		swsm = CSR_READ(sc, WMREG_SWSM);
   13635 		if (swsm & SWSM_SWESMBI)
   13636 			break;
   13637 
   13638 		delay(50);
   13639 		timeout--;
   13640 	}
   13641 
   13642 	if (timeout == 0) {
   13643 		aprint_error_dev(sc->sc_dev,
   13644 		    "could not acquire SWSM SWESMBI\n");
   13645 		/* Release semaphores */
   13646 		wm_put_swsm_semaphore(sc);
   13647 		return 1;
   13648 	}
   13649 	return 0;
   13650 }
   13651 
   13652 /*
   13653  * Put hardware semaphore.
   13654  * Same as e1000_put_hw_semaphore_generic()
   13655  */
   13656 static void
   13657 wm_put_swsm_semaphore(struct wm_softc *sc)
   13658 {
   13659 	uint32_t swsm;
   13660 
   13661 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13662 		device_xname(sc->sc_dev), __func__));
   13663 
   13664 	swsm = CSR_READ(sc, WMREG_SWSM);
   13665 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13666 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13667 }
   13668 
   13669 /*
   13670  * Get SW/FW semaphore.
   13671  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13672  */
   13673 static int
   13674 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13675 {
   13676 	uint32_t swfw_sync;
   13677 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13678 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13679 	int timeout;
   13680 
   13681 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13682 		device_xname(sc->sc_dev), __func__));
   13683 
   13684 	if (sc->sc_type == WM_T_80003)
   13685 		timeout = 50;
   13686 	else
   13687 		timeout = 200;
   13688 
   13689 	while (timeout) {
   13690 		if (wm_get_swsm_semaphore(sc)) {
   13691 			aprint_error_dev(sc->sc_dev,
   13692 			    "%s: failed to get semaphore\n",
   13693 			    __func__);
   13694 			return 1;
   13695 		}
   13696 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13697 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13698 			swfw_sync |= swmask;
   13699 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13700 			wm_put_swsm_semaphore(sc);
   13701 			return 0;
   13702 		}
   13703 		wm_put_swsm_semaphore(sc);
   13704 		delay(5000);
   13705 		timeout--;
   13706 	}
   13707 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13708 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13709 	return 1;
   13710 }
   13711 
   13712 static void
   13713 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13714 {
   13715 	uint32_t swfw_sync;
   13716 
   13717 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13718 		device_xname(sc->sc_dev), __func__));
   13719 
   13720 	while (wm_get_swsm_semaphore(sc) != 0)
   13721 		continue;
   13722 
   13723 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13724 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13725 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13726 
   13727 	wm_put_swsm_semaphore(sc);
   13728 }
   13729 
   13730 static int
   13731 wm_get_nvm_80003(struct wm_softc *sc)
   13732 {
   13733 	int rv;
   13734 
   13735 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13736 		device_xname(sc->sc_dev), __func__));
   13737 
   13738 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13739 		aprint_error_dev(sc->sc_dev,
   13740 		    "%s: failed to get semaphore(SWFW)\n",
   13741 		    __func__);
   13742 		return rv;
   13743 	}
   13744 
   13745 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13746 	    && (rv = wm_get_eecd(sc)) != 0) {
   13747 		aprint_error_dev(sc->sc_dev,
   13748 		    "%s: failed to get semaphore(EECD)\n",
   13749 		    __func__);
   13750 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13751 		return rv;
   13752 	}
   13753 
   13754 	return 0;
   13755 }
   13756 
   13757 static void
   13758 wm_put_nvm_80003(struct wm_softc *sc)
   13759 {
   13760 
   13761 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13762 		device_xname(sc->sc_dev), __func__));
   13763 
   13764 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13765 		wm_put_eecd(sc);
   13766 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13767 }
   13768 
   13769 static int
   13770 wm_get_nvm_82571(struct wm_softc *sc)
   13771 {
   13772 	int rv;
   13773 
   13774 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13775 		device_xname(sc->sc_dev), __func__));
   13776 
   13777 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13778 		return rv;
   13779 
   13780 	switch (sc->sc_type) {
   13781 	case WM_T_82573:
   13782 		break;
   13783 	default:
   13784 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13785 			rv = wm_get_eecd(sc);
   13786 		break;
   13787 	}
   13788 
   13789 	if (rv != 0) {
   13790 		aprint_error_dev(sc->sc_dev,
   13791 		    "%s: failed to get semaphore\n",
   13792 		    __func__);
   13793 		wm_put_swsm_semaphore(sc);
   13794 	}
   13795 
   13796 	return rv;
   13797 }
   13798 
   13799 static void
   13800 wm_put_nvm_82571(struct wm_softc *sc)
   13801 {
   13802 
   13803 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13804 		device_xname(sc->sc_dev), __func__));
   13805 
   13806 	switch (sc->sc_type) {
   13807 	case WM_T_82573:
   13808 		break;
   13809 	default:
   13810 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13811 			wm_put_eecd(sc);
   13812 		break;
   13813 	}
   13814 
   13815 	wm_put_swsm_semaphore(sc);
   13816 }
   13817 
   13818 static int
   13819 wm_get_phy_82575(struct wm_softc *sc)
   13820 {
   13821 
   13822 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13823 		device_xname(sc->sc_dev), __func__));
   13824 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13825 }
   13826 
   13827 static void
   13828 wm_put_phy_82575(struct wm_softc *sc)
   13829 {
   13830 
   13831 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13832 		device_xname(sc->sc_dev), __func__));
   13833 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13834 }
   13835 
   13836 static int
   13837 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13838 {
   13839 	uint32_t ext_ctrl;
   13840 	int timeout = 200;
   13841 
   13842 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13843 		device_xname(sc->sc_dev), __func__));
   13844 
   13845 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13846 	for (timeout = 0; timeout < 200; timeout++) {
   13847 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13848 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13849 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13850 
   13851 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13852 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13853 			return 0;
   13854 		delay(5000);
   13855 	}
   13856 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13857 	    device_xname(sc->sc_dev), ext_ctrl);
   13858 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13859 	return 1;
   13860 }
   13861 
   13862 static void
   13863 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13864 {
   13865 	uint32_t ext_ctrl;
   13866 
   13867 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13868 		device_xname(sc->sc_dev), __func__));
   13869 
   13870 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13871 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13872 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13873 
   13874 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13875 }
   13876 
   13877 static int
   13878 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13879 {
   13880 	uint32_t ext_ctrl;
   13881 	int timeout;
   13882 
   13883 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13884 		device_xname(sc->sc_dev), __func__));
   13885 	mutex_enter(sc->sc_ich_phymtx);
   13886 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13887 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13888 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13889 			break;
   13890 		delay(1000);
   13891 	}
   13892 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13893 		printf("%s: SW has already locked the resource\n",
   13894 		    device_xname(sc->sc_dev));
   13895 		goto out;
   13896 	}
   13897 
   13898 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13899 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13900 	for (timeout = 0; timeout < 1000; timeout++) {
   13901 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13902 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13903 			break;
   13904 		delay(1000);
   13905 	}
   13906 	if (timeout >= 1000) {
   13907 		printf("%s: failed to acquire semaphore\n",
   13908 		    device_xname(sc->sc_dev));
   13909 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13910 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13911 		goto out;
   13912 	}
   13913 	return 0;
   13914 
   13915 out:
   13916 	mutex_exit(sc->sc_ich_phymtx);
   13917 	return 1;
   13918 }
   13919 
   13920 static void
   13921 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13922 {
   13923 	uint32_t ext_ctrl;
   13924 
   13925 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13926 		device_xname(sc->sc_dev), __func__));
   13927 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13928 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13929 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13930 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13931 	} else {
   13932 		printf("%s: Semaphore unexpectedly released\n",
   13933 		    device_xname(sc->sc_dev));
   13934 	}
   13935 
   13936 	mutex_exit(sc->sc_ich_phymtx);
   13937 }
   13938 
   13939 static int
   13940 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13941 {
   13942 
   13943 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13944 		device_xname(sc->sc_dev), __func__));
   13945 	mutex_enter(sc->sc_ich_nvmmtx);
   13946 
   13947 	return 0;
   13948 }
   13949 
   13950 static void
   13951 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13952 {
   13953 
   13954 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13955 		device_xname(sc->sc_dev), __func__));
   13956 	mutex_exit(sc->sc_ich_nvmmtx);
   13957 }
   13958 
   13959 static int
   13960 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13961 {
   13962 	int i = 0;
   13963 	uint32_t reg;
   13964 
   13965 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13966 		device_xname(sc->sc_dev), __func__));
   13967 
   13968 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13969 	do {
   13970 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13971 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13972 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13973 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13974 			break;
   13975 		delay(2*1000);
   13976 		i++;
   13977 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13978 
   13979 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13980 		wm_put_hw_semaphore_82573(sc);
   13981 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13982 		    device_xname(sc->sc_dev));
   13983 		return -1;
   13984 	}
   13985 
   13986 	return 0;
   13987 }
   13988 
   13989 static void
   13990 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13991 {
   13992 	uint32_t reg;
   13993 
   13994 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13995 		device_xname(sc->sc_dev), __func__));
   13996 
   13997 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13998 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13999 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14000 }
   14001 
   14002 /*
   14003  * Management mode and power management related subroutines.
   14004  * BMC, AMT, suspend/resume and EEE.
   14005  */
   14006 
   14007 #ifdef WM_WOL
   14008 static int
   14009 wm_check_mng_mode(struct wm_softc *sc)
   14010 {
   14011 	int rv;
   14012 
   14013 	switch (sc->sc_type) {
   14014 	case WM_T_ICH8:
   14015 	case WM_T_ICH9:
   14016 	case WM_T_ICH10:
   14017 	case WM_T_PCH:
   14018 	case WM_T_PCH2:
   14019 	case WM_T_PCH_LPT:
   14020 	case WM_T_PCH_SPT:
   14021 	case WM_T_PCH_CNP:
   14022 		rv = wm_check_mng_mode_ich8lan(sc);
   14023 		break;
   14024 	case WM_T_82574:
   14025 	case WM_T_82583:
   14026 		rv = wm_check_mng_mode_82574(sc);
   14027 		break;
   14028 	case WM_T_82571:
   14029 	case WM_T_82572:
   14030 	case WM_T_82573:
   14031 	case WM_T_80003:
   14032 		rv = wm_check_mng_mode_generic(sc);
   14033 		break;
   14034 	default:
   14035 		/* noting to do */
   14036 		rv = 0;
   14037 		break;
   14038 	}
   14039 
   14040 	return rv;
   14041 }
   14042 
   14043 static int
   14044 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14045 {
   14046 	uint32_t fwsm;
   14047 
   14048 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14049 
   14050 	if (((fwsm & FWSM_FW_VALID) != 0)
   14051 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14052 		return 1;
   14053 
   14054 	return 0;
   14055 }
   14056 
   14057 static int
   14058 wm_check_mng_mode_82574(struct wm_softc *sc)
   14059 {
   14060 	uint16_t data;
   14061 
   14062 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14063 
   14064 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14065 		return 1;
   14066 
   14067 	return 0;
   14068 }
   14069 
   14070 static int
   14071 wm_check_mng_mode_generic(struct wm_softc *sc)
   14072 {
   14073 	uint32_t fwsm;
   14074 
   14075 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14076 
   14077 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14078 		return 1;
   14079 
   14080 	return 0;
   14081 }
   14082 #endif /* WM_WOL */
   14083 
   14084 static int
   14085 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14086 {
   14087 	uint32_t manc, fwsm, factps;
   14088 
   14089 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14090 		return 0;
   14091 
   14092 	manc = CSR_READ(sc, WMREG_MANC);
   14093 
   14094 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14095 		device_xname(sc->sc_dev), manc));
   14096 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14097 		return 0;
   14098 
   14099 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14100 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14101 		factps = CSR_READ(sc, WMREG_FACTPS);
   14102 		if (((factps & FACTPS_MNGCG) == 0)
   14103 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14104 			return 1;
   14105 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14106 		uint16_t data;
   14107 
   14108 		factps = CSR_READ(sc, WMREG_FACTPS);
   14109 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14110 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14111 			device_xname(sc->sc_dev), factps, data));
   14112 		if (((factps & FACTPS_MNGCG) == 0)
   14113 		    && ((data & NVM_CFG2_MNGM_MASK)
   14114 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14115 			return 1;
   14116 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14117 	    && ((manc & MANC_ASF_EN) == 0))
   14118 		return 1;
   14119 
   14120 	return 0;
   14121 }
   14122 
   14123 static bool
   14124 wm_phy_resetisblocked(struct wm_softc *sc)
   14125 {
   14126 	bool blocked = false;
   14127 	uint32_t reg;
   14128 	int i = 0;
   14129 
   14130 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14131 		device_xname(sc->sc_dev), __func__));
   14132 
   14133 	switch (sc->sc_type) {
   14134 	case WM_T_ICH8:
   14135 	case WM_T_ICH9:
   14136 	case WM_T_ICH10:
   14137 	case WM_T_PCH:
   14138 	case WM_T_PCH2:
   14139 	case WM_T_PCH_LPT:
   14140 	case WM_T_PCH_SPT:
   14141 	case WM_T_PCH_CNP:
   14142 		do {
   14143 			reg = CSR_READ(sc, WMREG_FWSM);
   14144 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14145 				blocked = true;
   14146 				delay(10*1000);
   14147 				continue;
   14148 			}
   14149 			blocked = false;
   14150 		} while (blocked && (i++ < 30));
   14151 		return blocked;
   14152 		break;
   14153 	case WM_T_82571:
   14154 	case WM_T_82572:
   14155 	case WM_T_82573:
   14156 	case WM_T_82574:
   14157 	case WM_T_82583:
   14158 	case WM_T_80003:
   14159 		reg = CSR_READ(sc, WMREG_MANC);
   14160 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14161 			return true;
   14162 		else
   14163 			return false;
   14164 		break;
   14165 	default:
   14166 		/* no problem */
   14167 		break;
   14168 	}
   14169 
   14170 	return false;
   14171 }
   14172 
   14173 static void
   14174 wm_get_hw_control(struct wm_softc *sc)
   14175 {
   14176 	uint32_t reg;
   14177 
   14178 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14179 		device_xname(sc->sc_dev), __func__));
   14180 
   14181 	if (sc->sc_type == WM_T_82573) {
   14182 		reg = CSR_READ(sc, WMREG_SWSM);
   14183 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14184 	} else if (sc->sc_type >= WM_T_82571) {
   14185 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14186 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14187 	}
   14188 }
   14189 
   14190 static void
   14191 wm_release_hw_control(struct wm_softc *sc)
   14192 {
   14193 	uint32_t reg;
   14194 
   14195 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14196 		device_xname(sc->sc_dev), __func__));
   14197 
   14198 	if (sc->sc_type == WM_T_82573) {
   14199 		reg = CSR_READ(sc, WMREG_SWSM);
   14200 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14201 	} else if (sc->sc_type >= WM_T_82571) {
   14202 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14203 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14204 	}
   14205 }
   14206 
   14207 static void
   14208 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14209 {
   14210 	uint32_t reg;
   14211 
   14212 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14213 		device_xname(sc->sc_dev), __func__));
   14214 
   14215 	if (sc->sc_type < WM_T_PCH2)
   14216 		return;
   14217 
   14218 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14219 
   14220 	if (gate)
   14221 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14222 	else
   14223 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14224 
   14225 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14226 }
   14227 
   14228 static int
   14229 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14230 {
   14231 	uint32_t fwsm, reg;
   14232 	int rv = 0;
   14233 
   14234 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14235 		device_xname(sc->sc_dev), __func__));
   14236 
   14237 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14238 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14239 
   14240 	/* Disable ULP */
   14241 	wm_ulp_disable(sc);
   14242 
   14243 	/* Acquire PHY semaphore */
   14244 	rv = sc->phy.acquire(sc);
   14245 	if (rv != 0) {
   14246 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14247 		device_xname(sc->sc_dev), __func__));
   14248 		return -1;
   14249 	}
   14250 
   14251 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14252 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14253 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14254 	 */
   14255 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14256 	switch (sc->sc_type) {
   14257 	case WM_T_PCH_LPT:
   14258 	case WM_T_PCH_SPT:
   14259 	case WM_T_PCH_CNP:
   14260 		if (wm_phy_is_accessible_pchlan(sc))
   14261 			break;
   14262 
   14263 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14264 		 * forcing MAC to SMBus mode first.
   14265 		 */
   14266 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14267 		reg |= CTRL_EXT_FORCE_SMBUS;
   14268 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14269 #if 0
   14270 		/* XXX Isn't this required??? */
   14271 		CSR_WRITE_FLUSH(sc);
   14272 #endif
   14273 		/* Wait 50 milliseconds for MAC to finish any retries
   14274 		 * that it might be trying to perform from previous
   14275 		 * attempts to acknowledge any phy read requests.
   14276 		 */
   14277 		delay(50 * 1000);
   14278 		/* FALLTHROUGH */
   14279 	case WM_T_PCH2:
   14280 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14281 			break;
   14282 		/* FALLTHROUGH */
   14283 	case WM_T_PCH:
   14284 		if (sc->sc_type == WM_T_PCH)
   14285 			if ((fwsm & FWSM_FW_VALID) != 0)
   14286 				break;
   14287 
   14288 		if (wm_phy_resetisblocked(sc) == true) {
   14289 			printf("XXX reset is blocked(3)\n");
   14290 			break;
   14291 		}
   14292 
   14293 		/* Toggle LANPHYPC Value bit */
   14294 		wm_toggle_lanphypc_pch_lpt(sc);
   14295 
   14296 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14297 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14298 				break;
   14299 
   14300 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14301 			 * so ensure that the MAC is also out of SMBus mode
   14302 			 */
   14303 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14304 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14305 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14306 
   14307 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14308 				break;
   14309 			rv = -1;
   14310 		}
   14311 		break;
   14312 	default:
   14313 		break;
   14314 	}
   14315 
   14316 	/* Release semaphore */
   14317 	sc->phy.release(sc);
   14318 
   14319 	if (rv == 0) {
   14320 		/* Check to see if able to reset PHY.  Print error if not */
   14321 		if (wm_phy_resetisblocked(sc)) {
   14322 			printf("XXX reset is blocked(4)\n");
   14323 			goto out;
   14324 		}
   14325 
   14326 		/* Reset the PHY before any access to it.  Doing so, ensures
   14327 		 * that the PHY is in a known good state before we read/write
   14328 		 * PHY registers.  The generic reset is sufficient here,
   14329 		 * because we haven't determined the PHY type yet.
   14330 		 */
   14331 		if (wm_reset_phy(sc) != 0)
   14332 			goto out;
   14333 
   14334 		/* On a successful reset, possibly need to wait for the PHY
   14335 		 * to quiesce to an accessible state before returning control
   14336 		 * to the calling function.  If the PHY does not quiesce, then
   14337 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14338 		 *  the PHY is in.
   14339 		 */
   14340 		if (wm_phy_resetisblocked(sc))
   14341 			printf("XXX reset is blocked(4)\n");
   14342 	}
   14343 
   14344 out:
   14345 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14346 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14347 		delay(10*1000);
   14348 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14349 	}
   14350 
   14351 	return 0;
   14352 }
   14353 
   14354 static void
   14355 wm_init_manageability(struct wm_softc *sc)
   14356 {
   14357 
   14358 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14359 		device_xname(sc->sc_dev), __func__));
   14360 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14361 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14362 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14363 
   14364 		/* Disable hardware interception of ARP */
   14365 		manc &= ~MANC_ARP_EN;
   14366 
   14367 		/* Enable receiving management packets to the host */
   14368 		if (sc->sc_type >= WM_T_82571) {
   14369 			manc |= MANC_EN_MNG2HOST;
   14370 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14371 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14372 		}
   14373 
   14374 		CSR_WRITE(sc, WMREG_MANC, manc);
   14375 	}
   14376 }
   14377 
   14378 static void
   14379 wm_release_manageability(struct wm_softc *sc)
   14380 {
   14381 
   14382 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14383 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14384 
   14385 		manc |= MANC_ARP_EN;
   14386 		if (sc->sc_type >= WM_T_82571)
   14387 			manc &= ~MANC_EN_MNG2HOST;
   14388 
   14389 		CSR_WRITE(sc, WMREG_MANC, manc);
   14390 	}
   14391 }
   14392 
   14393 static void
   14394 wm_get_wakeup(struct wm_softc *sc)
   14395 {
   14396 
   14397 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14398 	switch (sc->sc_type) {
   14399 	case WM_T_82573:
   14400 	case WM_T_82583:
   14401 		sc->sc_flags |= WM_F_HAS_AMT;
   14402 		/* FALLTHROUGH */
   14403 	case WM_T_80003:
   14404 	case WM_T_82575:
   14405 	case WM_T_82576:
   14406 	case WM_T_82580:
   14407 	case WM_T_I350:
   14408 	case WM_T_I354:
   14409 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14410 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14411 		/* FALLTHROUGH */
   14412 	case WM_T_82541:
   14413 	case WM_T_82541_2:
   14414 	case WM_T_82547:
   14415 	case WM_T_82547_2:
   14416 	case WM_T_82571:
   14417 	case WM_T_82572:
   14418 	case WM_T_82574:
   14419 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14420 		break;
   14421 	case WM_T_ICH8:
   14422 	case WM_T_ICH9:
   14423 	case WM_T_ICH10:
   14424 	case WM_T_PCH:
   14425 	case WM_T_PCH2:
   14426 	case WM_T_PCH_LPT:
   14427 	case WM_T_PCH_SPT:
   14428 	case WM_T_PCH_CNP:
   14429 		sc->sc_flags |= WM_F_HAS_AMT;
   14430 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14431 		break;
   14432 	default:
   14433 		break;
   14434 	}
   14435 
   14436 	/* 1: HAS_MANAGE */
   14437 	if (wm_enable_mng_pass_thru(sc) != 0)
   14438 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14439 
   14440 	/*
   14441 	 * Note that the WOL flags is set after the resetting of the eeprom
   14442 	 * stuff
   14443 	 */
   14444 }
   14445 
   14446 /*
   14447  * Unconfigure Ultra Low Power mode.
   14448  * Only for I217 and newer (see below).
   14449  */
   14450 static int
   14451 wm_ulp_disable(struct wm_softc *sc)
   14452 {
   14453 	uint32_t reg;
   14454 	uint16_t phyreg;
   14455 	int i = 0, rv = 0;
   14456 
   14457 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14458 		device_xname(sc->sc_dev), __func__));
   14459 	/* Exclude old devices */
   14460 	if ((sc->sc_type < WM_T_PCH_LPT)
   14461 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14462 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14463 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14464 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14465 		return 0;
   14466 
   14467 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14468 		/* Request ME un-configure ULP mode in the PHY */
   14469 		reg = CSR_READ(sc, WMREG_H2ME);
   14470 		reg &= ~H2ME_ULP;
   14471 		reg |= H2ME_ENFORCE_SETTINGS;
   14472 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14473 
   14474 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14475 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14476 			if (i++ == 30) {
   14477 				printf("%s timed out\n", __func__);
   14478 				return -1;
   14479 			}
   14480 			delay(10 * 1000);
   14481 		}
   14482 		reg = CSR_READ(sc, WMREG_H2ME);
   14483 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14484 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14485 
   14486 		return 0;
   14487 	}
   14488 
   14489 	/* Acquire semaphore */
   14490 	rv = sc->phy.acquire(sc);
   14491 	if (rv != 0) {
   14492 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14493 		device_xname(sc->sc_dev), __func__));
   14494 		return -1;
   14495 	}
   14496 
   14497 	/* Toggle LANPHYPC */
   14498 	wm_toggle_lanphypc_pch_lpt(sc);
   14499 
   14500 	/* Unforce SMBus mode in PHY */
   14501 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14502 	if (rv != 0) {
   14503 		uint32_t reg2;
   14504 
   14505 		printf("%s: Force SMBus first.\n", __func__);
   14506 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14507 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14508 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14509 		delay(50 * 1000);
   14510 
   14511 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14512 		    &phyreg);
   14513 		if (rv != 0)
   14514 			goto release;
   14515 	}
   14516 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14517 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14518 
   14519 	/* Unforce SMBus mode in MAC */
   14520 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14521 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14522 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14523 
   14524 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14525 	if (rv != 0)
   14526 		goto release;
   14527 	phyreg |= HV_PM_CTRL_K1_ENA;
   14528 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14529 
   14530 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14531 		&phyreg);
   14532 	if (rv != 0)
   14533 		goto release;
   14534 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14535 	    | I218_ULP_CONFIG1_STICKY_ULP
   14536 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14537 	    | I218_ULP_CONFIG1_WOL_HOST
   14538 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14539 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14540 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14541 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14542 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14543 	phyreg |= I218_ULP_CONFIG1_START;
   14544 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14545 
   14546 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14547 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14548 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14549 
   14550 release:
   14551 	/* Release semaphore */
   14552 	sc->phy.release(sc);
   14553 	wm_gmii_reset(sc);
   14554 	delay(50 * 1000);
   14555 
   14556 	return rv;
   14557 }
   14558 
   14559 /* WOL in the newer chipset interfaces (pchlan) */
   14560 static int
   14561 wm_enable_phy_wakeup(struct wm_softc *sc)
   14562 {
   14563 	device_t dev = sc->sc_dev;
   14564 	uint32_t mreg, moff;
   14565 	uint16_t wuce, wuc, wufc, preg;
   14566 	int i, rv;
   14567 
   14568 	KASSERT(sc->sc_type >= WM_T_PCH);
   14569 
   14570 	/* Copy MAC RARs to PHY RARs */
   14571 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   14572 
   14573 	/* Activate PHY wakeup */
   14574 	rv = sc->phy.acquire(sc);
   14575 	if (rv != 0) {
   14576 		device_printf(dev, "%s: failed to acquire semaphore\n",
   14577 		    __func__);
   14578 		return rv;
   14579 	}
   14580 
   14581 	/*
   14582 	 * Enable access to PHY wakeup registers.
   14583 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   14584 	 */
   14585 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   14586 	if (rv != 0) {
   14587 		device_printf(dev,
   14588 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   14589 		goto release;
   14590 	}
   14591 
   14592 	/* Copy MAC MTA to PHY MTA */
   14593 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   14594 		uint16_t lo, hi;
   14595 
   14596 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   14597 		lo = (uint16_t)(mreg & 0xffff);
   14598 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   14599 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   14600 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   14601 	}
   14602 
   14603 	/* Configure PHY Rx Control register */
   14604 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   14605 	mreg = CSR_READ(sc, WMREG_RCTL);
   14606 	if (mreg & RCTL_UPE)
   14607 		preg |= BM_RCTL_UPE;
   14608 	if (mreg & RCTL_MPE)
   14609 		preg |= BM_RCTL_MPE;
   14610 	preg &= ~(BM_RCTL_MO_MASK);
   14611 	moff = __SHIFTOUT(mreg, RCTL_MO);
   14612 	if (moff != 0)
   14613 		preg |= moff << BM_RCTL_MO_SHIFT;
   14614 	if (mreg & RCTL_BAM)
   14615 		preg |= BM_RCTL_BAM;
   14616 	if (mreg & RCTL_PMCF)
   14617 		preg |= BM_RCTL_PMCF;
   14618 	mreg = CSR_READ(sc, WMREG_CTRL);
   14619 	if (mreg & CTRL_RFCE)
   14620 		preg |= BM_RCTL_RFCE;
   14621 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   14622 
   14623 	wuc = WUC_APME | WUC_PME_EN;
   14624 	wufc = WUFC_MAG;
   14625 	/* Enable PHY wakeup in MAC register */
   14626 	CSR_WRITE(sc, WMREG_WUC,
   14627 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   14628 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   14629 
   14630 	/* Configure and enable PHY wakeup in PHY registers */
   14631 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   14632 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   14633 
   14634 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   14635 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   14636 
   14637 release:
   14638 	sc->phy.release(sc);
   14639 
   14640 	return 0;
   14641 }
   14642 
   14643 /* Power down workaround on D3 */
   14644 static void
   14645 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14646 {
   14647 	uint32_t reg;
   14648 	uint16_t phyreg;
   14649 	int i;
   14650 
   14651 	for (i = 0; i < 2; i++) {
   14652 		/* Disable link */
   14653 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14654 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14655 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14656 
   14657 		/*
   14658 		 * Call gig speed drop workaround on Gig disable before
   14659 		 * accessing any PHY registers
   14660 		 */
   14661 		if (sc->sc_type == WM_T_ICH8)
   14662 			wm_gig_downshift_workaround_ich8lan(sc);
   14663 
   14664 		/* Write VR power-down enable */
   14665 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14666 		phyreg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14667 		phyreg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14668 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, phyreg);
   14669 
   14670 		/* Read it back and test */
   14671 		sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL, &phyreg);
   14672 		phyreg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14673 		if ((phyreg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14674 			break;
   14675 
   14676 		/* Issue PHY reset and repeat at most one more time */
   14677 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14678 	}
   14679 }
   14680 
   14681 /*
   14682  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14683  *  @sc: pointer to the HW structure
   14684  *
   14685  *  During S0 to Sx transition, it is possible the link remains at gig
   14686  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14687  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14688  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14689  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14690  *  needs to be written.
   14691  *  Parts that support (and are linked to a partner which support) EEE in
   14692  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14693  *  than 10Mbps w/o EEE.
   14694  */
   14695 static void
   14696 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14697 {
   14698 	uint32_t phy_ctrl;
   14699 
   14700 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14701 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14702 
   14703 	if (sc->sc_phytype == WMPHY_I217) {
   14704 		uint16_t devid = sc->sc_pcidevid;
   14705 
   14706 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14707 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14708 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14709 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14710 		    (sc->sc_type >= WM_T_PCH_SPT))
   14711 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14712 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14713 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14714 
   14715 #if 0 /* notyet */
   14716 		if (sc->phy.acquire(sc) != 0)
   14717 			goto out;
   14718 
   14719 		/* XXX Do workaround for EEE */
   14720 
   14721 		/*
   14722 		 * For i217 Intel Rapid Start Technology support,
   14723 		 * when the system is going into Sx and no manageability engine
   14724 		 * is present, the driver must configure proxy to reset only on
   14725 		 * power good.	LPI (Low Power Idle) state must also reset only
   14726 		 * on power good, as well as the MTA (Multicast table array).
   14727 		 * The SMBus release must also be disabled on LCD reset.
   14728 		 */
   14729 
   14730 		/*
   14731 		 * Enable MTA to reset for Intel Rapid Start Technology
   14732 		 * Support
   14733 		 */
   14734 
   14735 		sc->phy.release(sc);
   14736 #endif
   14737 	}
   14738 #if 0
   14739 out:
   14740 #endif
   14741 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   14742 
   14743 	if (sc->sc_type == WM_T_ICH8)
   14744 		wm_gig_downshift_workaround_ich8lan(sc);
   14745 
   14746 	if (sc->sc_type >= WM_T_PCH) {
   14747 		wm_oem_bits_config_ich8lan(sc, false);
   14748 
   14749 		/* Reset PHY to activate OEM bits on 82577/8 */
   14750 		if (sc->sc_type == WM_T_PCH)
   14751 			wm_reset_phy(sc);
   14752 
   14753 		if (sc->phy.acquire(sc) != 0)
   14754 			return;
   14755 		wm_write_smbus_addr(sc);
   14756 		sc->phy.release(sc);
   14757 	}
   14758 }
   14759 
   14760 /*
   14761  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   14762  *  @sc: pointer to the HW structure
   14763  *
   14764  *  During Sx to S0 transitions on non-managed devices or managed devices
   14765  *  on which PHY resets are not blocked, if the PHY registers cannot be
   14766  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   14767  *  the PHY.
   14768  *  On i217, setup Intel Rapid Start Technology.
   14769  */
   14770 static int
   14771 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   14772 {
   14773 	device_t dev = sc->sc_dev;
   14774 	int rv;
   14775 
   14776 	if (sc->sc_type < WM_T_PCH2)
   14777 		return 0;
   14778 
   14779 	rv = wm_init_phy_workarounds_pchlan(sc);
   14780 	if (rv != 0)
   14781 		return -1;
   14782 
   14783 	/* For i217 Intel Rapid Start Technology support when the system
   14784 	 * is transitioning from Sx and no manageability engine is present
   14785 	 * configure SMBus to restore on reset, disable proxy, and enable
   14786 	 * the reset on MTA (Multicast table array).
   14787 	 */
   14788 	if (sc->sc_phytype == WMPHY_I217) {
   14789 		uint16_t phy_reg;
   14790 
   14791 		if (sc->phy.acquire(sc) != 0)
   14792 			return -1;
   14793 
   14794 		/* Clear Auto Enable LPI after link up */
   14795 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   14796 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14797 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   14798 
   14799 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14800 			/* Restore clear on SMB if no manageability engine
   14801 			 * is present
   14802 			 */
   14803 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   14804 			    &phy_reg);
   14805 			if (rv != 0)
   14806 				goto release;
   14807 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   14808 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   14809 
   14810 			/* Disable Proxy */
   14811 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   14812 		}
   14813 		/* Enable reset on MTA */
   14814 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   14815 		if (rv != 0)
   14816 			goto release;
   14817 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   14818 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   14819 
   14820 release:
   14821 		sc->phy.release(sc);
   14822 		return rv;
   14823 	}
   14824 
   14825 	return 0;
   14826 }
   14827 
   14828 static void
   14829 wm_enable_wakeup(struct wm_softc *sc)
   14830 {
   14831 	uint32_t reg, pmreg;
   14832 	pcireg_t pmode;
   14833 	int rv = 0;
   14834 
   14835 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14836 		device_xname(sc->sc_dev), __func__));
   14837 
   14838 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14839 	    &pmreg, NULL) == 0)
   14840 		return;
   14841 
   14842 	if ((sc->sc_flags & WM_F_WOL) == 0)
   14843 		goto pme;
   14844 
   14845 	/* Advertise the wakeup capability */
   14846 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   14847 	    | CTRL_SWDPIN(3));
   14848 
   14849 	/* Keep the laser running on fiber adapters */
   14850 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   14851 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   14852 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14853 		reg |= CTRL_EXT_SWDPIN(3);
   14854 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14855 	}
   14856 
   14857 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   14858 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   14859 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   14860 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   14861 		wm_suspend_workarounds_ich8lan(sc);
   14862 
   14863 #if 0	/* for the multicast packet */
   14864 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   14865 	reg |= WUFC_MC;
   14866 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   14867 #endif
   14868 
   14869 	if (sc->sc_type >= WM_T_PCH) {
   14870 		rv = wm_enable_phy_wakeup(sc);
   14871 		if (rv != 0)
   14872 			goto pme;
   14873 	} else {
   14874 		/* Enable wakeup by the MAC */
   14875 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   14876 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   14877 	}
   14878 
   14879 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14880 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14881 		|| (sc->sc_type == WM_T_PCH2))
   14882 	    && (sc->sc_phytype == WMPHY_IGP_3))
   14883 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   14884 
   14885 pme:
   14886 	/* Request PME */
   14887 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   14888 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   14889 		/* For WOL */
   14890 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   14891 	} else {
   14892 		/* Disable WOL */
   14893 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   14894 	}
   14895 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   14896 }
   14897 
   14898 /* Disable ASPM L0s and/or L1 for workaround */
   14899 static void
   14900 wm_disable_aspm(struct wm_softc *sc)
   14901 {
   14902 	pcireg_t reg, mask = 0;
   14903 	unsigned const char *str = "";
   14904 
   14905 	/*
   14906 	 *  Only for PCIe device which has PCIe capability in the PCI config
   14907 	 * space.
   14908 	 */
   14909 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   14910 		return;
   14911 
   14912 	switch (sc->sc_type) {
   14913 	case WM_T_82571:
   14914 	case WM_T_82572:
   14915 		/*
   14916 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   14917 		 * State Power management L1 State (ASPM L1).
   14918 		 */
   14919 		mask = PCIE_LCSR_ASPM_L1;
   14920 		str = "L1 is";
   14921 		break;
   14922 	case WM_T_82573:
   14923 	case WM_T_82574:
   14924 	case WM_T_82583:
   14925 		/*
   14926 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   14927 		 *
   14928 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   14929 		 * some chipset.  The document of 82574 and 82583 says that
   14930 		 * disabling L0s with some specific chipset is sufficient,
   14931 		 * but we follow as of the Intel em driver does.
   14932 		 *
   14933 		 * References:
   14934 		 * Errata 8 of the Specification Update of i82573.
   14935 		 * Errata 20 of the Specification Update of i82574.
   14936 		 * Errata 9 of the Specification Update of i82583.
   14937 		 */
   14938 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   14939 		str = "L0s and L1 are";
   14940 		break;
   14941 	default:
   14942 		return;
   14943 	}
   14944 
   14945 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14946 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   14947 	reg &= ~mask;
   14948 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14949 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   14950 
   14951 	/* Print only in wm_attach() */
   14952 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   14953 		aprint_verbose_dev(sc->sc_dev,
   14954 		    "ASPM %s disabled to workaround the errata.\n", str);
   14955 }
   14956 
   14957 /* LPLU */
   14958 
   14959 static void
   14960 wm_lplu_d0_disable(struct wm_softc *sc)
   14961 {
   14962 	struct mii_data *mii = &sc->sc_mii;
   14963 	uint32_t reg;
   14964 	uint16_t phyval;
   14965 
   14966 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14967 		device_xname(sc->sc_dev), __func__));
   14968 
   14969 	if (sc->sc_phytype == WMPHY_IFE)
   14970 		return;
   14971 
   14972 	switch (sc->sc_type) {
   14973 	case WM_T_82571:
   14974 	case WM_T_82572:
   14975 	case WM_T_82573:
   14976 	case WM_T_82575:
   14977 	case WM_T_82576:
   14978 		mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, &phyval);
   14979 		phyval &= ~PMR_D0_LPLU;
   14980 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, phyval);
   14981 		break;
   14982 	case WM_T_82580:
   14983 	case WM_T_I350:
   14984 	case WM_T_I210:
   14985 	case WM_T_I211:
   14986 		reg = CSR_READ(sc, WMREG_PHPM);
   14987 		reg &= ~PHPM_D0A_LPLU;
   14988 		CSR_WRITE(sc, WMREG_PHPM, reg);
   14989 		break;
   14990 	case WM_T_82574:
   14991 	case WM_T_82583:
   14992 	case WM_T_ICH8:
   14993 	case WM_T_ICH9:
   14994 	case WM_T_ICH10:
   14995 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14996 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   14997 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14998 		CSR_WRITE_FLUSH(sc);
   14999 		break;
   15000 	case WM_T_PCH:
   15001 	case WM_T_PCH2:
   15002 	case WM_T_PCH_LPT:
   15003 	case WM_T_PCH_SPT:
   15004 	case WM_T_PCH_CNP:
   15005 		wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS, &phyval);
   15006 		phyval &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   15007 		if (wm_phy_resetisblocked(sc) == false)
   15008 			phyval |= HV_OEM_BITS_ANEGNOW;
   15009 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, phyval);
   15010 		break;
   15011 	default:
   15012 		break;
   15013 	}
   15014 }
   15015 
   15016 /* EEE */
   15017 
   15018 static int
   15019 wm_set_eee_i350(struct wm_softc *sc)
   15020 {
   15021 	struct ethercom *ec = &sc->sc_ethercom;
   15022 	uint32_t ipcnfg, eeer;
   15023 	uint32_t ipcnfg_mask
   15024 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   15025 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   15026 
   15027 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   15028 	eeer = CSR_READ(sc, WMREG_EEER);
   15029 
   15030 	/* enable or disable per user setting */
   15031 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15032 		ipcnfg |= ipcnfg_mask;
   15033 		eeer |= eeer_mask;
   15034 	} else {
   15035 		ipcnfg &= ~ipcnfg_mask;
   15036 		eeer &= ~eeer_mask;
   15037 	}
   15038 
   15039 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   15040 	CSR_WRITE(sc, WMREG_EEER, eeer);
   15041 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   15042 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   15043 
   15044 	return 0;
   15045 }
   15046 
   15047 static int
   15048 wm_set_eee_pchlan(struct wm_softc *sc)
   15049 {
   15050 	device_t dev = sc->sc_dev;
   15051 	struct ethercom *ec = &sc->sc_ethercom;
   15052 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15053 	int rv = 0;
   15054 
   15055 	switch (sc->sc_phytype) {
   15056 	case WMPHY_82579:
   15057 		lpa = I82579_EEE_LP_ABILITY;
   15058 		pcs_status = I82579_EEE_PCS_STATUS;
   15059 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15060 		break;
   15061 	case WMPHY_I217:
   15062 		lpa = I217_EEE_LP_ABILITY;
   15063 		pcs_status = I217_EEE_PCS_STATUS;
   15064 		adv_addr = I217_EEE_ADVERTISEMENT;
   15065 		break;
   15066 	default:
   15067 		return 0;
   15068 	}
   15069 
   15070 	if (sc->phy.acquire(sc)) {
   15071 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15072 		return 0;
   15073 	}
   15074 
   15075 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15076 	if (rv != 0)
   15077 		goto release;
   15078 
   15079 	/* Clear bits that enable EEE in various speeds */
   15080 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15081 
   15082 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15083 		/* Save off link partner's EEE ability */
   15084 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15085 		if (rv != 0)
   15086 			goto release;
   15087 
   15088 		/* Read EEE advertisement */
   15089 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15090 			goto release;
   15091 
   15092 		/*
   15093 		 * Enable EEE only for speeds in which the link partner is
   15094 		 * EEE capable and for which we advertise EEE.
   15095 		 */
   15096 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15097 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15098 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15099 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15100 			if ((data & ANLPAR_TX_FD) != 0)
   15101 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15102 			else {
   15103 				/*
   15104 				 * EEE is not supported in 100Half, so ignore
   15105 				 * partner's EEE in 100 ability if full-duplex
   15106 				 * is not advertised.
   15107 				 */
   15108 				sc->eee_lp_ability
   15109 				    &= ~AN_EEEADVERT_100_TX;
   15110 			}
   15111 		}
   15112 	}
   15113 
   15114 	if (sc->sc_phytype == WMPHY_82579) {
   15115 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15116 		if (rv != 0)
   15117 			goto release;
   15118 
   15119 		data &= ~I82579_LPI_PLL_SHUT_100;
   15120 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15121 	}
   15122 
   15123 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15124 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15125 		goto release;
   15126 
   15127 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15128 release:
   15129 	sc->phy.release(sc);
   15130 
   15131 	return rv;
   15132 }
   15133 
   15134 static int
   15135 wm_set_eee(struct wm_softc *sc)
   15136 {
   15137 	struct ethercom *ec = &sc->sc_ethercom;
   15138 
   15139 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15140 		return 0;
   15141 
   15142 	if (sc->sc_type == WM_T_I354) {
   15143 		/* I354 uses an external PHY */
   15144 		return 0; /* not yet */
   15145 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15146 		return wm_set_eee_i350(sc);
   15147 	else if (sc->sc_type >= WM_T_PCH2)
   15148 		return wm_set_eee_pchlan(sc);
   15149 
   15150 	return 0;
   15151 }
   15152 
   15153 /*
   15154  * Workarounds (mainly PHY related).
   15155  * Basically, PHY's workarounds are in the PHY drivers.
   15156  */
   15157 
   15158 /* Work-around for 82566 Kumeran PCS lock loss */
   15159 static int
   15160 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15161 {
   15162 	struct mii_data *mii = &sc->sc_mii;
   15163 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15164 	int i, reg, rv;
   15165 	uint16_t phyreg;
   15166 
   15167 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15168 		device_xname(sc->sc_dev), __func__));
   15169 
   15170 	/* If the link is not up, do nothing */
   15171 	if ((status & STATUS_LU) == 0)
   15172 		return 0;
   15173 
   15174 	/* Nothing to do if the link is other than 1Gbps */
   15175 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15176 		return 0;
   15177 
   15178 	for (i = 0; i < 10; i++) {
   15179 		/* read twice */
   15180 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15181 		if (rv != 0)
   15182 			return rv;
   15183 		rv = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG, &phyreg);
   15184 		if (rv != 0)
   15185 			return rv;
   15186 
   15187 		if ((phyreg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15188 			goto out;	/* GOOD! */
   15189 
   15190 		/* Reset the PHY */
   15191 		wm_reset_phy(sc);
   15192 		delay(5*1000);
   15193 	}
   15194 
   15195 	/* Disable GigE link negotiation */
   15196 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15197 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15198 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15199 
   15200 	/*
   15201 	 * Call gig speed drop workaround on Gig disable before accessing
   15202 	 * any PHY registers.
   15203 	 */
   15204 	wm_gig_downshift_workaround_ich8lan(sc);
   15205 
   15206 out:
   15207 	return 0;
   15208 }
   15209 
   15210 /*
   15211  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15212  *  @sc: pointer to the HW structure
   15213  *
   15214  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15215  *  LPLU, Gig disable, MDIC PHY reset):
   15216  *    1) Set Kumeran Near-end loopback
   15217  *    2) Clear Kumeran Near-end loopback
   15218  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15219  */
   15220 static void
   15221 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15222 {
   15223 	uint16_t kmreg;
   15224 
   15225 	/* Only for igp3 */
   15226 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15227 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15228 			return;
   15229 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15230 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15231 			return;
   15232 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15233 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15234 	}
   15235 }
   15236 
   15237 /*
   15238  * Workaround for pch's PHYs
   15239  * XXX should be moved to new PHY driver?
   15240  */
   15241 static int
   15242 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15243 {
   15244 	int rv;
   15245 
   15246 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15247 		device_xname(sc->sc_dev), __func__));
   15248 	KASSERT(sc->sc_type == WM_T_PCH);
   15249 
   15250 	if (sc->sc_phytype == WMPHY_82577)
   15251 		if ((rv = wm_set_mdio_slow_mode_hv(sc)) != 0)
   15252 			return rv;
   15253 
   15254 	/* XXX (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   15255 
   15256 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15257 
   15258 	/* 82578 */
   15259 	if (sc->sc_phytype == WMPHY_82578) {
   15260 		struct mii_softc *child;
   15261 
   15262 		/*
   15263 		 * Return registers to default by doing a soft reset then
   15264 		 * writing 0x3140 to the control register
   15265 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15266 		 */
   15267 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15268 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   15269 			PHY_RESET(child);
   15270 			rv = sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   15271 			    0x3140);
   15272 			if (rv != 0)
   15273 				return rv;
   15274 		}
   15275 	}
   15276 
   15277 	/* Select page 0 */
   15278 	if ((rv = sc->phy.acquire(sc)) != 0)
   15279 		return rv;
   15280 	rv = wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15281 	sc->phy.release(sc);
   15282 	if (rv != 0)
   15283 		return rv;
   15284 
   15285 	/*
   15286 	 * Configure the K1 Si workaround during phy reset assuming there is
   15287 	 * link so that it disables K1 if link is in 1Gbps.
   15288 	 */
   15289 	if ((rv = wm_k1_gig_workaround_hv(sc, 1)) != 0)
   15290 		return rv;
   15291 
   15292 	return rv;
   15293 }
   15294 
   15295 /*
   15296  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15297  *  @sc:   pointer to the HW structure
   15298  */
   15299 static void
   15300 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15301 {
   15302 	device_t dev = sc->sc_dev;
   15303 	uint32_t mac_reg;
   15304 	uint16_t i, wuce;
   15305 	int count;
   15306 
   15307 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15308 		device_xname(sc->sc_dev), __func__));
   15309 
   15310 	if (sc->phy.acquire(sc) != 0)
   15311 		return;
   15312 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15313 		goto release;
   15314 
   15315 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15316 	count = wm_rar_count(sc);
   15317 	for (i = 0; i < count; i++) {
   15318 		uint16_t lo, hi;
   15319 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15320 		lo = (uint16_t)(mac_reg & 0xffff);
   15321 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15322 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15323 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15324 
   15325 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15326 		lo = (uint16_t)(mac_reg & 0xffff);
   15327 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15328 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15329 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15330 	}
   15331 
   15332 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15333 
   15334 release:
   15335 	sc->phy.release(sc);
   15336 }
   15337 
   15338 /*
   15339  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15340  *  done after every PHY reset.
   15341  */
   15342 static int
   15343 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15344 {
   15345 	int rv;
   15346 
   15347 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15348 		device_xname(sc->sc_dev), __func__));
   15349 	KASSERT(sc->sc_type == WM_T_PCH2);
   15350 
   15351 	/* Set MDIO slow mode before any other MDIO access */
   15352 	rv = wm_set_mdio_slow_mode_hv(sc);
   15353 
   15354 	/* XXX set MSE higher to enable link to stay up when noise is high */
   15355 	/* XXX drop link after 5 times MSE threshold was reached */
   15356 
   15357 	return rv;
   15358 }
   15359 
   15360 /**
   15361  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15362  *  @link: link up bool flag
   15363  *
   15364  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15365  *  preventing further DMA write requests.  Workaround the issue by disabling
   15366  *  the de-assertion of the clock request when in 1Gpbs mode.
   15367  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15368  *  speeds in order to avoid Tx hangs.
   15369  **/
   15370 static int
   15371 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15372 {
   15373 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15374 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15375 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15376 	uint16_t phyreg;
   15377 
   15378 	if (link && (speed == STATUS_SPEED_1000)) {
   15379 		sc->phy.acquire(sc);
   15380 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15381 		    &phyreg);
   15382 		if (rv != 0)
   15383 			goto release;
   15384 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15385 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15386 		if (rv != 0)
   15387 			goto release;
   15388 		delay(20);
   15389 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15390 
   15391 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15392 		    &phyreg);
   15393 release:
   15394 		sc->phy.release(sc);
   15395 		return rv;
   15396 	}
   15397 
   15398 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15399 
   15400 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15401 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15402 	    || !link
   15403 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15404 		goto update_fextnvm6;
   15405 
   15406 	wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL, &phyreg);
   15407 
   15408 	/* Clear link status transmit timeout */
   15409 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15410 	if (speed == STATUS_SPEED_100) {
   15411 		/* Set inband Tx timeout to 5x10us for 100Half */
   15412 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15413 
   15414 		/* Do not extend the K1 entry latency for 100Half */
   15415 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15416 	} else {
   15417 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15418 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15419 
   15420 		/* Extend the K1 entry latency for 10 Mbps */
   15421 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15422 	}
   15423 
   15424 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15425 
   15426 update_fextnvm6:
   15427 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15428 	return 0;
   15429 }
   15430 
   15431 /*
   15432  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15433  *  @sc:   pointer to the HW structure
   15434  *  @link: link up bool flag
   15435  *
   15436  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15437  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15438  *  If link is down, the function will restore the default K1 setting located
   15439  *  in the NVM.
   15440  */
   15441 static int
   15442 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15443 {
   15444 	int k1_enable = sc->sc_nvm_k1_enabled;
   15445 
   15446 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15447 		device_xname(sc->sc_dev), __func__));
   15448 
   15449 	if (sc->phy.acquire(sc) != 0)
   15450 		return -1;
   15451 
   15452 	if (link) {
   15453 		k1_enable = 0;
   15454 
   15455 		/* Link stall fix for link up */
   15456 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15457 		    0x0100);
   15458 	} else {
   15459 		/* Link stall fix for link down */
   15460 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15461 		    0x4100);
   15462 	}
   15463 
   15464 	wm_configure_k1_ich8lan(sc, k1_enable);
   15465 	sc->phy.release(sc);
   15466 
   15467 	return 0;
   15468 }
   15469 
   15470 /*
   15471  *  wm_k1_workaround_lv - K1 Si workaround
   15472  *  @sc:   pointer to the HW structure
   15473  *
   15474  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   15475  *  Disable K1 for 1000 and 100 speeds
   15476  */
   15477 static int
   15478 wm_k1_workaround_lv(struct wm_softc *sc)
   15479 {
   15480 	uint32_t reg;
   15481 	uint16_t phyreg;
   15482 	int rv;
   15483 
   15484 	if (sc->sc_type != WM_T_PCH2)
   15485 		return 0;
   15486 
   15487 	/* Set K1 beacon duration based on 10Mbps speed */
   15488 	rv = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS, &phyreg);
   15489 	if (rv != 0)
   15490 		return rv;
   15491 
   15492 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   15493 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   15494 		if (phyreg &
   15495 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   15496 			/* LV 1G/100 Packet drop issue wa  */
   15497 			rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL,
   15498 			    &phyreg);
   15499 			if (rv != 0)
   15500 				return rv;
   15501 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   15502 			rv = wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL,
   15503 			    phyreg);
   15504 			if (rv != 0)
   15505 				return rv;
   15506 		} else {
   15507 			/* For 10Mbps */
   15508 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   15509 			reg &= ~FEXTNVM4_BEACON_DURATION;
   15510 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   15511 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   15512 		}
   15513 	}
   15514 
   15515 	return 0;
   15516 }
   15517 
   15518 /*
   15519  *  wm_link_stall_workaround_hv - Si workaround
   15520  *  @sc: pointer to the HW structure
   15521  *
   15522  *  This function works around a Si bug where the link partner can get
   15523  *  a link up indication before the PHY does. If small packets are sent
   15524  *  by the link partner they can be placed in the packet buffer without
   15525  *  being properly accounted for by the PHY and will stall preventing
   15526  *  further packets from being received.  The workaround is to clear the
   15527  *  packet buffer after the PHY detects link up.
   15528  */
   15529 static int
   15530 wm_link_stall_workaround_hv(struct wm_softc *sc)
   15531 {
   15532 	uint16_t phyreg;
   15533 
   15534 	if (sc->sc_phytype != WMPHY_82578)
   15535 		return 0;
   15536 
   15537 	/* Do not apply workaround if in PHY loopback bit 14 set */
   15538 	wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR, &phyreg);
   15539 	if ((phyreg & BMCR_LOOP) != 0)
   15540 		return 0;
   15541 
   15542 	/* check if link is up and at 1Gbps */
   15543 	wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS, &phyreg);
   15544 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15545 	    | BM_CS_STATUS_SPEED_MASK;
   15546 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15547 		| BM_CS_STATUS_SPEED_1000))
   15548 		return 0;
   15549 
   15550 	delay(200 * 1000);	/* XXX too big */
   15551 
   15552 	/* flush the packets in the fifo buffer */
   15553 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15554 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   15555 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15556 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   15557 
   15558 	return 0;
   15559 }
   15560 
   15561 static int
   15562 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   15563 {
   15564 	int rv;
   15565 	uint16_t reg;
   15566 
   15567 	rv = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL, &reg);
   15568 	if (rv != 0)
   15569 		return rv;
   15570 
   15571 	return  wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   15572 	    reg | HV_KMRN_MDIO_SLOW);
   15573 }
   15574 
   15575 /*
   15576  *  wm_configure_k1_ich8lan - Configure K1 power state
   15577  *  @sc: pointer to the HW structure
   15578  *  @enable: K1 state to configure
   15579  *
   15580  *  Configure the K1 power state based on the provided parameter.
   15581  *  Assumes semaphore already acquired.
   15582  */
   15583 static void
   15584 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   15585 {
   15586 	uint32_t ctrl, ctrl_ext, tmp;
   15587 	uint16_t kmreg;
   15588 	int rv;
   15589 
   15590 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15591 
   15592 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   15593 	if (rv != 0)
   15594 		return;
   15595 
   15596 	if (k1_enable)
   15597 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   15598 	else
   15599 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   15600 
   15601 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   15602 	if (rv != 0)
   15603 		return;
   15604 
   15605 	delay(20);
   15606 
   15607 	ctrl = CSR_READ(sc, WMREG_CTRL);
   15608 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   15609 
   15610 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   15611 	tmp |= CTRL_FRCSPD;
   15612 
   15613 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   15614 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   15615 	CSR_WRITE_FLUSH(sc);
   15616 	delay(20);
   15617 
   15618 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   15619 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   15620 	CSR_WRITE_FLUSH(sc);
   15621 	delay(20);
   15622 
   15623 	return;
   15624 }
   15625 
   15626 /* special case - for 82575 - need to do manual init ... */
   15627 static void
   15628 wm_reset_init_script_82575(struct wm_softc *sc)
   15629 {
   15630 	/*
   15631 	 * remark: this is untested code - we have no board without EEPROM
   15632 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   15633 	 */
   15634 
   15635 	/* SerDes configuration via SERDESCTRL */
   15636 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   15637 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   15638 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   15639 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   15640 
   15641 	/* CCM configuration via CCMCTL register */
   15642 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   15643 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   15644 
   15645 	/* PCIe lanes configuration */
   15646 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   15647 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   15648 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   15649 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   15650 
   15651 	/* PCIe PLL Configuration */
   15652 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   15653 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   15654 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   15655 }
   15656 
   15657 static void
   15658 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   15659 {
   15660 	uint32_t reg;
   15661 	uint16_t nvmword;
   15662 	int rv;
   15663 
   15664 	if (sc->sc_type != WM_T_82580)
   15665 		return;
   15666 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   15667 		return;
   15668 
   15669 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   15670 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   15671 	if (rv != 0) {
   15672 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   15673 		    __func__);
   15674 		return;
   15675 	}
   15676 
   15677 	reg = CSR_READ(sc, WMREG_MDICNFG);
   15678 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   15679 		reg |= MDICNFG_DEST;
   15680 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   15681 		reg |= MDICNFG_COM_MDIO;
   15682 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15683 }
   15684 
   15685 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   15686 
   15687 static bool
   15688 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   15689 {
   15690 	uint32_t reg;
   15691 	uint16_t id1, id2;
   15692 	int i, rv;
   15693 
   15694 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15695 		device_xname(sc->sc_dev), __func__));
   15696 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15697 
   15698 	id1 = id2 = 0xffff;
   15699 	for (i = 0; i < 2; i++) {
   15700 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   15701 		    &id1);
   15702 		if ((rv != 0) || MII_INVALIDID(id1))
   15703 			continue;
   15704 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   15705 		    &id2);
   15706 		if ((rv != 0) || MII_INVALIDID(id2))
   15707 			continue;
   15708 		break;
   15709 	}
   15710 	if ((rv == 0) && !MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   15711 		goto out;
   15712 
   15713 	/*
   15714 	 * In case the PHY needs to be in mdio slow mode,
   15715 	 * set slow mode and try to get the PHY id again.
   15716 	 */
   15717 	rv = 0;
   15718 	if (sc->sc_type < WM_T_PCH_LPT) {
   15719 		sc->phy.release(sc);
   15720 		wm_set_mdio_slow_mode_hv(sc);
   15721 		rv = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1, &id1);
   15722 		rv |= wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2, &id2);
   15723 		sc->phy.acquire(sc);
   15724 	}
   15725 	if ((rv != 0) || MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   15726 		printf("XXX return with false\n");
   15727 		return false;
   15728 	}
   15729 out:
   15730 	if (sc->sc_type >= WM_T_PCH_LPT) {
   15731 		/* Only unforce SMBus if ME is not active */
   15732 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15733 			uint16_t phyreg;
   15734 
   15735 			/* Unforce SMBus mode in PHY */
   15736 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   15737 			    CV_SMB_CTRL, &phyreg);
   15738 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15739 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   15740 			    CV_SMB_CTRL, phyreg);
   15741 
   15742 			/* Unforce SMBus mode in MAC */
   15743 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15744 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15745 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15746 		}
   15747 	}
   15748 	return true;
   15749 }
   15750 
   15751 static void
   15752 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   15753 {
   15754 	uint32_t reg;
   15755 	int i;
   15756 
   15757 	/* Set PHY Config Counter to 50msec */
   15758 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   15759 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   15760 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   15761 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   15762 
   15763 	/* Toggle LANPHYPC */
   15764 	reg = CSR_READ(sc, WMREG_CTRL);
   15765 	reg |= CTRL_LANPHYPC_OVERRIDE;
   15766 	reg &= ~CTRL_LANPHYPC_VALUE;
   15767 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15768 	CSR_WRITE_FLUSH(sc);
   15769 	delay(1000);
   15770 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   15771 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15772 	CSR_WRITE_FLUSH(sc);
   15773 
   15774 	if (sc->sc_type < WM_T_PCH_LPT)
   15775 		delay(50 * 1000);
   15776 	else {
   15777 		i = 20;
   15778 
   15779 		do {
   15780 			delay(5 * 1000);
   15781 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   15782 		    && i--);
   15783 
   15784 		delay(30 * 1000);
   15785 	}
   15786 }
   15787 
   15788 static int
   15789 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   15790 {
   15791 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   15792 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   15793 	uint32_t rxa;
   15794 	uint16_t scale = 0, lat_enc = 0;
   15795 	int32_t obff_hwm = 0;
   15796 	int64_t lat_ns, value;
   15797 
   15798 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15799 		device_xname(sc->sc_dev), __func__));
   15800 
   15801 	if (link) {
   15802 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   15803 		uint32_t status;
   15804 		uint16_t speed;
   15805 		pcireg_t preg;
   15806 
   15807 		status = CSR_READ(sc, WMREG_STATUS);
   15808 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   15809 		case STATUS_SPEED_10:
   15810 			speed = 10;
   15811 			break;
   15812 		case STATUS_SPEED_100:
   15813 			speed = 100;
   15814 			break;
   15815 		case STATUS_SPEED_1000:
   15816 			speed = 1000;
   15817 			break;
   15818 		default:
   15819 			device_printf(sc->sc_dev, "Unknown speed "
   15820 			    "(status = %08x)\n", status);
   15821 			return -1;
   15822 		}
   15823 
   15824 		/* Rx Packet Buffer Allocation size (KB) */
   15825 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   15826 
   15827 		/*
   15828 		 * Determine the maximum latency tolerated by the device.
   15829 		 *
   15830 		 * Per the PCIe spec, the tolerated latencies are encoded as
   15831 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   15832 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   15833 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   15834 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   15835 		 */
   15836 		lat_ns = ((int64_t)rxa * 1024 -
   15837 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   15838 			+ ETHER_HDR_LEN))) * 8 * 1000;
   15839 		if (lat_ns < 0)
   15840 			lat_ns = 0;
   15841 		else
   15842 			lat_ns /= speed;
   15843 		value = lat_ns;
   15844 
   15845 		while (value > LTRV_VALUE) {
   15846 			scale ++;
   15847 			value = howmany(value, __BIT(5));
   15848 		}
   15849 		if (scale > LTRV_SCALE_MAX) {
   15850 			printf("%s: Invalid LTR latency scale %d\n",
   15851 			    device_xname(sc->sc_dev), scale);
   15852 			return -1;
   15853 		}
   15854 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   15855 
   15856 		/* Determine the maximum latency tolerated by the platform */
   15857 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15858 		    WM_PCI_LTR_CAP_LPT);
   15859 		max_snoop = preg & 0xffff;
   15860 		max_nosnoop = preg >> 16;
   15861 
   15862 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   15863 
   15864 		if (lat_enc > max_ltr_enc) {
   15865 			lat_enc = max_ltr_enc;
   15866 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   15867 			    * PCI_LTR_SCALETONS(
   15868 				    __SHIFTOUT(lat_enc,
   15869 					PCI_LTR_MAXSNOOPLAT_SCALE));
   15870 		}
   15871 
   15872 		if (lat_ns) {
   15873 			lat_ns *= speed * 1000;
   15874 			lat_ns /= 8;
   15875 			lat_ns /= 1000000000;
   15876 			obff_hwm = (int32_t)(rxa - lat_ns);
   15877 		}
   15878 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   15879 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   15880 			    "(rxa = %d, lat_ns = %d)\n",
   15881 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   15882 			return -1;
   15883 		}
   15884 	}
   15885 	/* Snoop and No-Snoop latencies the same */
   15886 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   15887 	CSR_WRITE(sc, WMREG_LTRV, reg);
   15888 
   15889 	/* Set OBFF high water mark */
   15890 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   15891 	reg |= obff_hwm;
   15892 	CSR_WRITE(sc, WMREG_SVT, reg);
   15893 
   15894 	/* Enable OBFF */
   15895 	reg = CSR_READ(sc, WMREG_SVCR);
   15896 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   15897 	CSR_WRITE(sc, WMREG_SVCR, reg);
   15898 
   15899 	return 0;
   15900 }
   15901 
   15902 /*
   15903  * I210 Errata 25 and I211 Errata 10
   15904  * Slow System Clock.
   15905  */
   15906 static int
   15907 wm_pll_workaround_i210(struct wm_softc *sc)
   15908 {
   15909 	uint32_t mdicnfg, wuc;
   15910 	uint32_t reg;
   15911 	pcireg_t pcireg;
   15912 	uint32_t pmreg;
   15913 	uint16_t nvmword, tmp_nvmword;
   15914 	uint16_t phyval;
   15915 	bool wa_done = false;
   15916 	int i, rv = 0;
   15917 
   15918 	/* Get Power Management cap offset */
   15919 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15920 	    &pmreg, NULL) == 0)
   15921 		return -1;
   15922 
   15923 	/* Save WUC and MDICNFG registers */
   15924 	wuc = CSR_READ(sc, WMREG_WUC);
   15925 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   15926 
   15927 	reg = mdicnfg & ~MDICNFG_DEST;
   15928 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15929 
   15930 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   15931 		nvmword = INVM_DEFAULT_AL;
   15932 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   15933 
   15934 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   15935 		wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   15936 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG, &phyval);
   15937 
   15938 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   15939 			rv = 0;
   15940 			break; /* OK */
   15941 		} else
   15942 			rv = -1;
   15943 
   15944 		wa_done = true;
   15945 		/* Directly reset the internal PHY */
   15946 		reg = CSR_READ(sc, WMREG_CTRL);
   15947 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   15948 
   15949 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15950 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   15951 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15952 
   15953 		CSR_WRITE(sc, WMREG_WUC, 0);
   15954 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   15955 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   15956 
   15957 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15958 		    pmreg + PCI_PMCSR);
   15959 		pcireg |= PCI_PMCSR_STATE_D3;
   15960 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15961 		    pmreg + PCI_PMCSR, pcireg);
   15962 		delay(1000);
   15963 		pcireg &= ~PCI_PMCSR_STATE_D3;
   15964 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15965 		    pmreg + PCI_PMCSR, pcireg);
   15966 
   15967 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   15968 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   15969 
   15970 		/* Restore WUC register */
   15971 		CSR_WRITE(sc, WMREG_WUC, wuc);
   15972 	}
   15973 
   15974 	/* Restore MDICNFG setting */
   15975 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   15976 	if (wa_done)
   15977 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   15978 	return rv;
   15979 }
   15980 
   15981 static void
   15982 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   15983 {
   15984 	uint32_t reg;
   15985 
   15986 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15987 		device_xname(sc->sc_dev), __func__));
   15988 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   15989 	    || (sc->sc_type == WM_T_PCH_CNP));
   15990 
   15991 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15992 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   15993 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15994 
   15995 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   15996 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   15997 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   15998 }
   15999