Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.616
      1 /*	$NetBSD: if_wm.c,v 1.616 2019/01/11 05:13:26 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.616 2019/01/11 05:13:26 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/mdio.h>
    133 #include <dev/mii/miivar.h>
    134 #include <dev/mii/miidevs.h>
    135 #include <dev/mii/mii_bitbang.h>
    136 #include <dev/mii/ikphyreg.h>
    137 #include <dev/mii/igphyreg.h>
    138 #include <dev/mii/igphyvar.h>
    139 #include <dev/mii/inbmphyreg.h>
    140 #include <dev/mii/ihphyreg.h>
    141 
    142 #include <dev/pci/pcireg.h>
    143 #include <dev/pci/pcivar.h>
    144 #include <dev/pci/pcidevs.h>
    145 
    146 #include <dev/pci/if_wmreg.h>
    147 #include <dev/pci/if_wmvar.h>
    148 
    149 #ifdef WM_DEBUG
    150 #define	WM_DEBUG_LINK		__BIT(0)
    151 #define	WM_DEBUG_TX		__BIT(1)
    152 #define	WM_DEBUG_RX		__BIT(2)
    153 #define	WM_DEBUG_GMII		__BIT(3)
    154 #define	WM_DEBUG_MANAGE		__BIT(4)
    155 #define	WM_DEBUG_NVM		__BIT(5)
    156 #define	WM_DEBUG_INIT		__BIT(6)
    157 #define	WM_DEBUG_LOCK		__BIT(7)
    158 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    159     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    160 
    161 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    162 #else
    163 #define	DPRINTF(x, y)	/* nothing */
    164 #endif /* WM_DEBUG */
    165 
    166 #ifdef NET_MPSAFE
    167 #define WM_MPSAFE	1
    168 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    169 #else
    170 #define CALLOUT_FLAGS	0
    171 #endif
    172 
    173 /*
    174  * This device driver's max interrupt numbers.
    175  */
    176 #define WM_MAX_NQUEUEINTR	16
    177 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    178 
    179 #ifndef WM_DISABLE_MSI
    180 #define	WM_DISABLE_MSI 0
    181 #endif
    182 #ifndef WM_DISABLE_MSIX
    183 #define	WM_DISABLE_MSIX 0
    184 #endif
    185 
    186 int wm_disable_msi = WM_DISABLE_MSI;
    187 int wm_disable_msix = WM_DISABLE_MSIX;
    188 
    189 #ifndef WM_WATCHDOG_TIMEOUT
    190 #define WM_WATCHDOG_TIMEOUT 5
    191 #endif
    192 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    193 
    194 /*
    195  * Transmit descriptor list size.  Due to errata, we can only have
    196  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    197  * on >= 82544. We tell the upper layers that they can queue a lot
    198  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    199  * of them at a time.
    200  *
    201  * We allow up to 64 DMA segments per packet.  Pathological packet
    202  * chains containing many small mbufs have been observed in zero-copy
    203  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    204  * m_defrag() is called to reduce it.
    205  */
    206 #define	WM_NTXSEGS		64
    207 #define	WM_IFQUEUELEN		256
    208 #define	WM_TXQUEUELEN_MAX	64
    209 #define	WM_TXQUEUELEN_MAX_82547	16
    210 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    211 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    212 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    213 #define	WM_NTXDESC_82542	256
    214 #define	WM_NTXDESC_82544	4096
    215 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    216 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    217 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    218 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    219 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    220 
    221 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    222 
    223 #define	WM_TXINTERQSIZE		256
    224 
    225 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    226 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    227 #endif
    228 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    229 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    230 #endif
    231 
    232 /*
    233  * Receive descriptor list size.  We have one Rx buffer for normal
    234  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    235  * packet.  We allocate 256 receive descriptors, each with a 2k
    236  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    237  */
    238 #define	WM_NRXDESC		256
    239 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    240 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    241 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    242 
    243 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    244 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    245 #endif
    246 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    247 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    248 #endif
    249 
    250 typedef union txdescs {
    251 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    252 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    253 } txdescs_t;
    254 
    255 typedef union rxdescs {
    256 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    257 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    258 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    259 } rxdescs_t;
    260 
    261 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    262 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    263 
    264 /*
    265  * Software state for transmit jobs.
    266  */
    267 struct wm_txsoft {
    268 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    269 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    270 	int txs_firstdesc;		/* first descriptor in packet */
    271 	int txs_lastdesc;		/* last descriptor in packet */
    272 	int txs_ndesc;			/* # of descriptors used */
    273 };
    274 
    275 /*
    276  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    277  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    278  * them together.
    279  */
    280 struct wm_rxsoft {
    281 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    282 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    283 };
    284 
    285 #define WM_LINKUP_TIMEOUT	50
    286 
    287 static uint16_t swfwphysem[] = {
    288 	SWFW_PHY0_SM,
    289 	SWFW_PHY1_SM,
    290 	SWFW_PHY2_SM,
    291 	SWFW_PHY3_SM
    292 };
    293 
    294 static const uint32_t wm_82580_rxpbs_table[] = {
    295 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    296 };
    297 
    298 struct wm_softc;
    299 
    300 #ifdef WM_EVENT_COUNTERS
    301 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    302 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    303 	struct evcnt qname##_ev_##evname;
    304 
    305 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    306 	do {								\
    307 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    308 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    309 		    "%s%02d%s", #qname, (qnum), #evname);		\
    310 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    311 		    (evtype), NULL, (xname),				\
    312 		    (q)->qname##_##evname##_evcnt_name);		\
    313 	} while (0)
    314 
    315 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    316 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    317 
    318 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    319 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    320 
    321 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    322 	evcnt_detach(&(q)->qname##_ev_##evname);
    323 #endif /* WM_EVENT_COUNTERS */
    324 
    325 struct wm_txqueue {
    326 	kmutex_t *txq_lock;		/* lock for tx operations */
    327 
    328 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    329 
    330 	/* Software state for the transmit descriptors. */
    331 	int txq_num;			/* must be a power of two */
    332 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    333 
    334 	/* TX control data structures. */
    335 	int txq_ndesc;			/* must be a power of two */
    336 	size_t txq_descsize;		/* a tx descriptor size */
    337 	txdescs_t *txq_descs_u;
    338 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    339 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    340 	int txq_desc_rseg;		/* real number of control segment */
    341 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    342 #define	txq_descs	txq_descs_u->sctxu_txdescs
    343 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    344 
    345 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    346 
    347 	int txq_free;			/* number of free Tx descriptors */
    348 	int txq_next;			/* next ready Tx descriptor */
    349 
    350 	int txq_sfree;			/* number of free Tx jobs */
    351 	int txq_snext;			/* next free Tx job */
    352 	int txq_sdirty;			/* dirty Tx jobs */
    353 
    354 	/* These 4 variables are used only on the 82547. */
    355 	int txq_fifo_size;		/* Tx FIFO size */
    356 	int txq_fifo_head;		/* current head of FIFO */
    357 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    358 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    359 
    360 	/*
    361 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    362 	 * CPUs. This queue intermediate them without block.
    363 	 */
    364 	pcq_t *txq_interq;
    365 
    366 	/*
    367 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    368 	 * to manage Tx H/W queue's busy flag.
    369 	 */
    370 	int txq_flags;			/* flags for H/W queue, see below */
    371 #define	WM_TXQ_NO_SPACE	0x1
    372 
    373 	bool txq_stopping;
    374 
    375 	bool txq_sending;
    376 	time_t txq_lastsent;
    377 
    378 	uint32_t txq_packets;		/* for AIM */
    379 	uint32_t txq_bytes;		/* for AIM */
    380 #ifdef WM_EVENT_COUNTERS
    381 	/* TX event counters */
    382 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    383 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    384 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    385 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    386 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    387 					    /* XXX not used? */
    388 
    389 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    392 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    393 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    394 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    395 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    396 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    397 					    /* other than toomanyseg */
    398 
    399 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    400 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    401 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    402 
    403 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    404 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    405 #endif /* WM_EVENT_COUNTERS */
    406 };
    407 
    408 struct wm_rxqueue {
    409 	kmutex_t *rxq_lock;		/* lock for rx operations */
    410 
    411 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    412 
    413 	/* Software state for the receive descriptors. */
    414 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    415 
    416 	/* RX control data structures. */
    417 	int rxq_ndesc;			/* must be a power of two */
    418 	size_t rxq_descsize;		/* a rx descriptor size */
    419 	rxdescs_t *rxq_descs_u;
    420 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    421 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    422 	int rxq_desc_rseg;		/* real number of control segment */
    423 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    424 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    425 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    426 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    427 
    428 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    429 
    430 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    431 	int rxq_discard;
    432 	int rxq_len;
    433 	struct mbuf *rxq_head;
    434 	struct mbuf *rxq_tail;
    435 	struct mbuf **rxq_tailp;
    436 
    437 	bool rxq_stopping;
    438 
    439 	uint32_t rxq_packets;		/* for AIM */
    440 	uint32_t rxq_bytes;		/* for AIM */
    441 #ifdef WM_EVENT_COUNTERS
    442 	/* RX event counters */
    443 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    444 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    445 
    446 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    447 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    448 #endif
    449 };
    450 
    451 struct wm_queue {
    452 	int wmq_id;			/* index of TX/RX queues */
    453 	int wmq_intr_idx;		/* index of MSI-X tables */
    454 
    455 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    456 	bool wmq_set_itr;
    457 
    458 	struct wm_txqueue wmq_txq;
    459 	struct wm_rxqueue wmq_rxq;
    460 
    461 	void *wmq_si;
    462 };
    463 
    464 struct wm_phyop {
    465 	int (*acquire)(struct wm_softc *);
    466 	void (*release)(struct wm_softc *);
    467 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    468 	int (*writereg_locked)(device_t, int, int, uint16_t);
    469 	int reset_delay_us;
    470 };
    471 
    472 struct wm_nvmop {
    473 	int (*acquire)(struct wm_softc *);
    474 	void (*release)(struct wm_softc *);
    475 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    476 };
    477 
    478 /*
    479  * Software state per device.
    480  */
    481 struct wm_softc {
    482 	device_t sc_dev;		/* generic device information */
    483 	bus_space_tag_t sc_st;		/* bus space tag */
    484 	bus_space_handle_t sc_sh;	/* bus space handle */
    485 	bus_size_t sc_ss;		/* bus space size */
    486 	bus_space_tag_t sc_iot;		/* I/O space tag */
    487 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    488 	bus_size_t sc_ios;		/* I/O space size */
    489 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    490 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    491 	bus_size_t sc_flashs;		/* flash registers space size */
    492 	off_t sc_flashreg_offset;	/*
    493 					 * offset to flash registers from
    494 					 * start of BAR
    495 					 */
    496 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    497 
    498 	struct ethercom sc_ethercom;	/* ethernet common data */
    499 	struct mii_data sc_mii;		/* MII/media information */
    500 
    501 	pci_chipset_tag_t sc_pc;
    502 	pcitag_t sc_pcitag;
    503 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    504 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    505 
    506 	uint16_t sc_pcidevid;		/* PCI device ID */
    507 	wm_chip_type sc_type;		/* MAC type */
    508 	int sc_rev;			/* MAC revision */
    509 	wm_phy_type sc_phytype;		/* PHY type */
    510 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    511 #define	WM_MEDIATYPE_UNKNOWN		0x00
    512 #define	WM_MEDIATYPE_FIBER		0x01
    513 #define	WM_MEDIATYPE_COPPER		0x02
    514 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    515 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    516 	int sc_flags;			/* flags; see below */
    517 	int sc_if_flags;		/* last if_flags */
    518 	int sc_ec_capenable;		/* last ec_capenable */
    519 	int sc_flowflags;		/* 802.3x flow control flags */
    520 	uint16_t eee_lp_ability;	/* EEE link partner's ability */
    521 	int sc_align_tweak;
    522 
    523 	void *sc_ihs[WM_MAX_NINTR];	/*
    524 					 * interrupt cookie.
    525 					 * - legacy and msi use sc_ihs[0] only
    526 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    527 					 */
    528 	pci_intr_handle_t *sc_intrs;	/*
    529 					 * legacy and msi use sc_intrs[0] only
    530 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    531 					 */
    532 	int sc_nintrs;			/* number of interrupts */
    533 
    534 	int sc_link_intr_idx;		/* index of MSI-X tables */
    535 
    536 	callout_t sc_tick_ch;		/* tick callout */
    537 	bool sc_core_stopping;
    538 
    539 	int sc_nvm_ver_major;
    540 	int sc_nvm_ver_minor;
    541 	int sc_nvm_ver_build;
    542 	int sc_nvm_addrbits;		/* NVM address bits */
    543 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    544 	int sc_ich8_flash_base;
    545 	int sc_ich8_flash_bank_size;
    546 	int sc_nvm_k1_enabled;
    547 
    548 	int sc_nqueues;
    549 	struct wm_queue *sc_queue;
    550 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    551 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    552 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    553 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    554 
    555 	int sc_affinity_offset;
    556 
    557 #ifdef WM_EVENT_COUNTERS
    558 	/* Event counters. */
    559 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    560 
    561 	/* WM_T_82542_2_1 only */
    562 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    563 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    564 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    565 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    566 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    567 #endif /* WM_EVENT_COUNTERS */
    568 
    569 	/* This variable are used only on the 82547. */
    570 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    571 
    572 	uint32_t sc_ctrl;		/* prototype CTRL register */
    573 #if 0
    574 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    575 #endif
    576 	uint32_t sc_icr;		/* prototype interrupt bits */
    577 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    578 	uint32_t sc_tctl;		/* prototype TCTL register */
    579 	uint32_t sc_rctl;		/* prototype RCTL register */
    580 	uint32_t sc_txcw;		/* prototype TXCW register */
    581 	uint32_t sc_tipg;		/* prototype TIPG register */
    582 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    583 	uint32_t sc_pba;		/* prototype PBA register */
    584 
    585 	int sc_tbi_linkup;		/* TBI link status */
    586 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    587 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    588 
    589 	int sc_mchash_type;		/* multicast filter offset */
    590 
    591 	krndsource_t rnd_source;	/* random source */
    592 
    593 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    594 
    595 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    596 	kmutex_t *sc_ich_phymtx;	/*
    597 					 * 82574/82583/ICH/PCH specific PHY
    598 					 * mutex. For 82574/82583, the mutex
    599 					 * is used for both PHY and NVM.
    600 					 */
    601 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    602 
    603 	struct wm_phyop phy;
    604 	struct wm_nvmop nvm;
    605 };
    606 
    607 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    608 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    609 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    610 
    611 #define	WM_RXCHAIN_RESET(rxq)						\
    612 do {									\
    613 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    614 	*(rxq)->rxq_tailp = NULL;					\
    615 	(rxq)->rxq_len = 0;						\
    616 } while (/*CONSTCOND*/0)
    617 
    618 #define	WM_RXCHAIN_LINK(rxq, m)						\
    619 do {									\
    620 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    621 	(rxq)->rxq_tailp = &(m)->m_next;				\
    622 } while (/*CONSTCOND*/0)
    623 
    624 #ifdef WM_EVENT_COUNTERS
    625 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    626 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    627 
    628 #define WM_Q_EVCNT_INCR(qname, evname)			\
    629 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    630 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    631 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    632 #else /* !WM_EVENT_COUNTERS */
    633 #define	WM_EVCNT_INCR(ev)	/* nothing */
    634 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    635 
    636 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    637 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    638 #endif /* !WM_EVENT_COUNTERS */
    639 
    640 #define	CSR_READ(sc, reg)						\
    641 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    642 #define	CSR_WRITE(sc, reg, val)						\
    643 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    644 #define	CSR_WRITE_FLUSH(sc)						\
    645 	(void) CSR_READ((sc), WMREG_STATUS)
    646 
    647 #define ICH8_FLASH_READ32(sc, reg)					\
    648 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    649 	    (reg) + sc->sc_flashreg_offset)
    650 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    651 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    652 	    (reg) + sc->sc_flashreg_offset, (data))
    653 
    654 #define ICH8_FLASH_READ16(sc, reg)					\
    655 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    656 	    (reg) + sc->sc_flashreg_offset)
    657 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    658 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    659 	    (reg) + sc->sc_flashreg_offset, (data))
    660 
    661 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    662 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    663 
    664 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    665 #define	WM_CDTXADDR_HI(txq, x)						\
    666 	(sizeof(bus_addr_t) == 8 ?					\
    667 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    668 
    669 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    670 #define	WM_CDRXADDR_HI(rxq, x)						\
    671 	(sizeof(bus_addr_t) == 8 ?					\
    672 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    673 
    674 /*
    675  * Register read/write functions.
    676  * Other than CSR_{READ|WRITE}().
    677  */
    678 #if 0
    679 static inline uint32_t wm_io_read(struct wm_softc *, int);
    680 #endif
    681 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    682 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    683     uint32_t, uint32_t);
    684 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    685 
    686 /*
    687  * Descriptor sync/init functions.
    688  */
    689 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    690 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    691 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    692 
    693 /*
    694  * Device driver interface functions and commonly used functions.
    695  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    696  */
    697 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    698 static int	wm_match(device_t, cfdata_t, void *);
    699 static void	wm_attach(device_t, device_t, void *);
    700 static int	wm_detach(device_t, int);
    701 static bool	wm_suspend(device_t, const pmf_qual_t *);
    702 static bool	wm_resume(device_t, const pmf_qual_t *);
    703 static void	wm_watchdog(struct ifnet *);
    704 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    705     uint16_t *);
    706 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    707     uint16_t *);
    708 static void	wm_tick(void *);
    709 static int	wm_ifflags_cb(struct ethercom *);
    710 static int	wm_ioctl(struct ifnet *, u_long, void *);
    711 /* MAC address related */
    712 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    713 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    714 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    715 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    716 static int	wm_rar_count(struct wm_softc *);
    717 static void	wm_set_filter(struct wm_softc *);
    718 /* Reset and init related */
    719 static void	wm_set_vlan(struct wm_softc *);
    720 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    721 static void	wm_get_auto_rd_done(struct wm_softc *);
    722 static void	wm_lan_init_done(struct wm_softc *);
    723 static void	wm_get_cfg_done(struct wm_softc *);
    724 static void	wm_phy_post_reset(struct wm_softc *);
    725 static int	wm_write_smbus_addr(struct wm_softc *);
    726 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    727 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    728 static void	wm_initialize_hardware_bits(struct wm_softc *);
    729 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    730 static int	wm_reset_phy(struct wm_softc *);
    731 static void	wm_flush_desc_rings(struct wm_softc *);
    732 static void	wm_reset(struct wm_softc *);
    733 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    734 static void	wm_rxdrain(struct wm_rxqueue *);
    735 static void	wm_init_rss(struct wm_softc *);
    736 static void	wm_adjust_qnum(struct wm_softc *, int);
    737 static inline bool	wm_is_using_msix(struct wm_softc *);
    738 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    739 static int	wm_softint_establish(struct wm_softc *, int, int);
    740 static int	wm_setup_legacy(struct wm_softc *);
    741 static int	wm_setup_msix(struct wm_softc *);
    742 static int	wm_init(struct ifnet *);
    743 static int	wm_init_locked(struct ifnet *);
    744 static void	wm_unset_stopping_flags(struct wm_softc *);
    745 static void	wm_set_stopping_flags(struct wm_softc *);
    746 static void	wm_stop(struct ifnet *, int);
    747 static void	wm_stop_locked(struct ifnet *, int);
    748 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    749 static void	wm_82547_txfifo_stall(void *);
    750 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    751 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    752 /* DMA related */
    753 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    754 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    755 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    756 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    757     struct wm_txqueue *);
    758 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    759 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    760 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    761     struct wm_rxqueue *);
    762 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    763 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    764 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    765 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    766 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    767 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    768 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    769     struct wm_txqueue *);
    770 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    771     struct wm_rxqueue *);
    772 static int	wm_alloc_txrx_queues(struct wm_softc *);
    773 static void	wm_free_txrx_queues(struct wm_softc *);
    774 static int	wm_init_txrx_queues(struct wm_softc *);
    775 /* Start */
    776 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    777     struct wm_txsoft *, uint32_t *, uint8_t *);
    778 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    779 static void	wm_start(struct ifnet *);
    780 static void	wm_start_locked(struct ifnet *);
    781 static int	wm_transmit(struct ifnet *, struct mbuf *);
    782 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    783 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    784     bool);
    785 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    786     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    787 static void	wm_nq_start(struct ifnet *);
    788 static void	wm_nq_start_locked(struct ifnet *);
    789 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    790 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    791 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    792     bool);
    793 static void	wm_deferred_start_locked(struct wm_txqueue *);
    794 static void	wm_handle_queue(void *);
    795 /* Interrupt */
    796 static bool	wm_txeof(struct wm_txqueue *, u_int);
    797 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    798 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    799 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    800 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    801 static void	wm_linkintr(struct wm_softc *, uint32_t);
    802 static int	wm_intr_legacy(void *);
    803 static inline void	wm_txrxintr_disable(struct wm_queue *);
    804 static inline void	wm_txrxintr_enable(struct wm_queue *);
    805 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    806 static int	wm_txrxintr_msix(void *);
    807 static int	wm_linkintr_msix(void *);
    808 
    809 /*
    810  * Media related.
    811  * GMII, SGMII, TBI, SERDES and SFP.
    812  */
    813 /* Common */
    814 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    815 /* GMII related */
    816 static void	wm_gmii_reset(struct wm_softc *);
    817 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    818 static int	wm_get_phy_id_82575(struct wm_softc *);
    819 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    820 static int	wm_gmii_mediachange(struct ifnet *);
    821 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    822 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    823 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    824 static int	wm_gmii_i82543_readreg(device_t, int, int);
    825 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    826 static int	wm_gmii_mdic_readreg(device_t, int, int);
    827 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    828 static int	wm_gmii_i82544_readreg(device_t, int, int);
    829 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    830 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    831 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    832 static int	wm_gmii_i80003_readreg(device_t, int, int);
    833 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    834 static int	wm_gmii_bm_readreg(device_t, int, int);
    835 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    836 static int	wm_enable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    837 static int	wm_disable_phy_wakeup_reg_access_bm(device_t, uint16_t *);
    838 static int	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int,
    839 	bool);
    840 static int	wm_gmii_hv_readreg(device_t, int, int);
    841 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    842 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    843 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    844 static int	wm_gmii_82580_readreg(device_t, int, int);
    845 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    846 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    847 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    848 static void	wm_gmii_statchg(struct ifnet *);
    849 /*
    850  * kumeran related (80003, ICH* and PCH*).
    851  * These functions are not for accessing MII registers but for accessing
    852  * kumeran specific registers.
    853  */
    854 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    855 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    856 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    857 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    858 /* EMI register related */
    859 static int	wm_access_emi_reg_locked(device_t, int, uint16_t *, bool);
    860 static int	wm_read_emi_reg_locked(device_t, int, uint16_t *);
    861 static int	wm_write_emi_reg_locked(device_t, int, uint16_t);
    862 /* SGMII */
    863 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    864 static int	wm_sgmii_readreg(device_t, int, int);
    865 static int	wm_sgmii_readreg_locked(device_t, int, int, uint16_t *);
    866 static void	wm_sgmii_writereg(device_t, int, int, int);
    867 static int	wm_sgmii_writereg_locked(device_t, int, int, uint16_t);
    868 /* TBI related */
    869 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    870 static void	wm_tbi_mediainit(struct wm_softc *);
    871 static int	wm_tbi_mediachange(struct ifnet *);
    872 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    873 static int	wm_check_for_link(struct wm_softc *);
    874 static void	wm_tbi_tick(struct wm_softc *);
    875 /* SERDES related */
    876 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    877 static int	wm_serdes_mediachange(struct ifnet *);
    878 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    879 static void	wm_serdes_tick(struct wm_softc *);
    880 /* SFP related */
    881 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    882 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    883 
    884 /*
    885  * NVM related.
    886  * Microwire, SPI (w/wo EERD) and Flash.
    887  */
    888 /* Misc functions */
    889 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    890 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    891 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    892 /* Microwire */
    893 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    894 /* SPI */
    895 static int	wm_nvm_ready_spi(struct wm_softc *);
    896 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    897 /* Using with EERD */
    898 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    899 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    900 /* Flash */
    901 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    902     unsigned int *);
    903 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    904 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    905 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    906     uint32_t *);
    907 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    908 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    909 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    910 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    911 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    912 /* iNVM */
    913 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    914 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    915 /* Lock, detecting NVM type, validate checksum and read */
    916 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    917 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    918 static int	wm_nvm_validate_checksum(struct wm_softc *);
    919 static void	wm_nvm_version_invm(struct wm_softc *);
    920 static void	wm_nvm_version(struct wm_softc *);
    921 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    922 
    923 /*
    924  * Hardware semaphores.
    925  * Very complexed...
    926  */
    927 static int	wm_get_null(struct wm_softc *);
    928 static void	wm_put_null(struct wm_softc *);
    929 static int	wm_get_eecd(struct wm_softc *);
    930 static void	wm_put_eecd(struct wm_softc *);
    931 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    932 static void	wm_put_swsm_semaphore(struct wm_softc *);
    933 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    934 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    935 static int	wm_get_nvm_80003(struct wm_softc *);
    936 static void	wm_put_nvm_80003(struct wm_softc *);
    937 static int	wm_get_nvm_82571(struct wm_softc *);
    938 static void	wm_put_nvm_82571(struct wm_softc *);
    939 static int	wm_get_phy_82575(struct wm_softc *);
    940 static void	wm_put_phy_82575(struct wm_softc *);
    941 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    942 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    943 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    944 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    945 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    946 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    947 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    948 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    949 
    950 /*
    951  * Management mode and power management related subroutines.
    952  * BMC, AMT, suspend/resume and EEE.
    953  */
    954 #if 0
    955 static int	wm_check_mng_mode(struct wm_softc *);
    956 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    957 static int	wm_check_mng_mode_82574(struct wm_softc *);
    958 static int	wm_check_mng_mode_generic(struct wm_softc *);
    959 #endif
    960 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    961 static bool	wm_phy_resetisblocked(struct wm_softc *);
    962 static void	wm_get_hw_control(struct wm_softc *);
    963 static void	wm_release_hw_control(struct wm_softc *);
    964 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    965 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    966 static void	wm_init_manageability(struct wm_softc *);
    967 static void	wm_release_manageability(struct wm_softc *);
    968 static void	wm_get_wakeup(struct wm_softc *);
    969 static int	wm_ulp_disable(struct wm_softc *);
    970 static int	wm_enable_phy_wakeup(struct wm_softc *);
    971 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    972 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    973 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    974 static void	wm_enable_wakeup(struct wm_softc *);
    975 static void	wm_disable_aspm(struct wm_softc *);
    976 /* LPLU (Low Power Link Up) */
    977 static void	wm_lplu_d0_disable(struct wm_softc *);
    978 /* EEE */
    979 static int	wm_set_eee_i350(struct wm_softc *);
    980 static int	wm_set_eee_pchlan(struct wm_softc *);
    981 static int	wm_set_eee(struct wm_softc *);
    982 
    983 /*
    984  * Workarounds (mainly PHY related).
    985  * Basically, PHY's workarounds are in the PHY drivers.
    986  */
    987 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    988 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    989 static void	wm_hv_phy_workarounds_ich8lan(struct wm_softc *);
    990 static void	wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *);
    991 static void	wm_lv_phy_workarounds_ich8lan(struct wm_softc *);
    992 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    993 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    994 static int	wm_k1_workaround_lv(struct wm_softc *);
    995 static int	wm_link_stall_workaround_hv(struct wm_softc *);
    996 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    997 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    998 static void	wm_reset_init_script_82575(struct wm_softc *);
    999 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
   1000 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
   1001 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
   1002 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
   1003 static void	wm_pll_workaround_i210(struct wm_softc *);
   1004 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
   1005 
   1006 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
   1007     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
   1008 
   1009 /*
   1010  * Devices supported by this driver.
   1011  */
   1012 static const struct wm_product {
   1013 	pci_vendor_id_t		wmp_vendor;
   1014 	pci_product_id_t	wmp_product;
   1015 	const char		*wmp_name;
   1016 	wm_chip_type		wmp_type;
   1017 	uint32_t		wmp_flags;
   1018 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1019 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1020 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1021 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1022 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1023 } wm_products[] = {
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1025 	  "Intel i82542 1000BASE-X Ethernet",
   1026 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1029 	  "Intel i82543GC 1000BASE-X Ethernet",
   1030 	  WM_T_82543,		WMP_F_FIBER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1033 	  "Intel i82543GC 1000BASE-T Ethernet",
   1034 	  WM_T_82543,		WMP_F_COPPER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1037 	  "Intel i82544EI 1000BASE-T Ethernet",
   1038 	  WM_T_82544,		WMP_F_COPPER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1041 	  "Intel i82544EI 1000BASE-X Ethernet",
   1042 	  WM_T_82544,		WMP_F_FIBER },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1045 	  "Intel i82544GC 1000BASE-T Ethernet",
   1046 	  WM_T_82544,		WMP_F_COPPER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1049 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1050 	  WM_T_82544,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1053 	  "Intel i82540EM 1000BASE-T Ethernet",
   1054 	  WM_T_82540,		WMP_F_COPPER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1057 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1058 	  WM_T_82540,		WMP_F_COPPER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1061 	  "Intel i82540EP 1000BASE-T Ethernet",
   1062 	  WM_T_82540,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1065 	  "Intel i82540EP 1000BASE-T Ethernet",
   1066 	  WM_T_82540,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1069 	  "Intel i82540EP 1000BASE-T Ethernet",
   1070 	  WM_T_82540,		WMP_F_COPPER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1073 	  "Intel i82545EM 1000BASE-T Ethernet",
   1074 	  WM_T_82545,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1077 	  "Intel i82545GM 1000BASE-T Ethernet",
   1078 	  WM_T_82545_3,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1081 	  "Intel i82545GM 1000BASE-X Ethernet",
   1082 	  WM_T_82545_3,		WMP_F_FIBER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1085 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1086 	  WM_T_82545_3,		WMP_F_SERDES },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1089 	  "Intel i82546EB 1000BASE-T Ethernet",
   1090 	  WM_T_82546,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1093 	  "Intel i82546EB 1000BASE-T Ethernet",
   1094 	  WM_T_82546,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1097 	  "Intel i82545EM 1000BASE-X Ethernet",
   1098 	  WM_T_82545,		WMP_F_FIBER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1101 	  "Intel i82546EB 1000BASE-X Ethernet",
   1102 	  WM_T_82546,		WMP_F_FIBER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1105 	  "Intel i82546GB 1000BASE-T Ethernet",
   1106 	  WM_T_82546_3,		WMP_F_COPPER },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1109 	  "Intel i82546GB 1000BASE-X Ethernet",
   1110 	  WM_T_82546_3,		WMP_F_FIBER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1113 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1114 	  WM_T_82546_3,		WMP_F_SERDES },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1117 	  "i82546GB quad-port Gigabit Ethernet",
   1118 	  WM_T_82546_3,		WMP_F_COPPER },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1121 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1122 	  WM_T_82546_3,		WMP_F_COPPER },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1125 	  "Intel PRO/1000MT (82546GB)",
   1126 	  WM_T_82546_3,		WMP_F_COPPER },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1129 	  "Intel i82541EI 1000BASE-T Ethernet",
   1130 	  WM_T_82541,		WMP_F_COPPER },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1133 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1134 	  WM_T_82541,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1137 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1138 	  WM_T_82541,		WMP_F_COPPER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1141 	  "Intel i82541ER 1000BASE-T Ethernet",
   1142 	  WM_T_82541_2,		WMP_F_COPPER },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1145 	  "Intel i82541GI 1000BASE-T Ethernet",
   1146 	  WM_T_82541_2,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1149 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1150 	  WM_T_82541_2,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1153 	  "Intel i82541PI 1000BASE-T Ethernet",
   1154 	  WM_T_82541_2,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1157 	  "Intel i82547EI 1000BASE-T Ethernet",
   1158 	  WM_T_82547,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1161 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1162 	  WM_T_82547,		WMP_F_COPPER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1165 	  "Intel i82547GI 1000BASE-T Ethernet",
   1166 	  WM_T_82547_2,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1169 	  "Intel PRO/1000 PT (82571EB)",
   1170 	  WM_T_82571,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1173 	  "Intel PRO/1000 PF (82571EB)",
   1174 	  WM_T_82571,		WMP_F_FIBER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1177 	  "Intel PRO/1000 PB (82571EB)",
   1178 	  WM_T_82571,		WMP_F_SERDES },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1181 	  "Intel PRO/1000 QT (82571EB)",
   1182 	  WM_T_82571,		WMP_F_COPPER },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1185 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1186 	  WM_T_82571,		WMP_F_COPPER, },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1189 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1190 	  WM_T_82571,		WMP_F_COPPER, },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1193 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1194 	  WM_T_82571,		WMP_F_SERDES, },
   1195 
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1197 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1198 	  WM_T_82571,		WMP_F_SERDES, },
   1199 
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1201 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1202 	  WM_T_82571,		WMP_F_FIBER, },
   1203 
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1205 	  "Intel i82572EI 1000baseT Ethernet",
   1206 	  WM_T_82572,		WMP_F_COPPER },
   1207 
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1209 	  "Intel i82572EI 1000baseX Ethernet",
   1210 	  WM_T_82572,		WMP_F_FIBER },
   1211 
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1213 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1214 	  WM_T_82572,		WMP_F_SERDES },
   1215 
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1217 	  "Intel i82572EI 1000baseT Ethernet",
   1218 	  WM_T_82572,		WMP_F_COPPER },
   1219 
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1221 	  "Intel i82573E",
   1222 	  WM_T_82573,		WMP_F_COPPER },
   1223 
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1225 	  "Intel i82573E IAMT",
   1226 	  WM_T_82573,		WMP_F_COPPER },
   1227 
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1229 	  "Intel i82573L Gigabit Ethernet",
   1230 	  WM_T_82573,		WMP_F_COPPER },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1233 	  "Intel i82574L",
   1234 	  WM_T_82574,		WMP_F_COPPER },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1237 	  "Intel i82574L",
   1238 	  WM_T_82574,		WMP_F_COPPER },
   1239 
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1241 	  "Intel i82583V",
   1242 	  WM_T_82583,		WMP_F_COPPER },
   1243 
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1245 	  "i80003 dual 1000baseT Ethernet",
   1246 	  WM_T_80003,		WMP_F_COPPER },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1249 	  "i80003 dual 1000baseX Ethernet",
   1250 	  WM_T_80003,		WMP_F_COPPER },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1253 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1254 	  WM_T_80003,		WMP_F_SERDES },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1257 	  "Intel i80003 1000baseT Ethernet",
   1258 	  WM_T_80003,		WMP_F_COPPER },
   1259 
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1261 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1262 	  WM_T_80003,		WMP_F_SERDES },
   1263 
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1265 	  "Intel i82801H (M_AMT) LAN Controller",
   1266 	  WM_T_ICH8,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1268 	  "Intel i82801H (AMT) LAN Controller",
   1269 	  WM_T_ICH8,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1271 	  "Intel i82801H LAN Controller",
   1272 	  WM_T_ICH8,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1274 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1275 	  WM_T_ICH8,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1277 	  "Intel i82801H (M) LAN Controller",
   1278 	  WM_T_ICH8,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1280 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1281 	  WM_T_ICH8,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1283 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1284 	  WM_T_ICH8,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1286 	  "82567V-3 LAN Controller",
   1287 	  WM_T_ICH8,		WMP_F_COPPER },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1289 	  "82801I (AMT) LAN Controller",
   1290 	  WM_T_ICH9,		WMP_F_COPPER },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1292 	  "82801I 10/100 LAN Controller",
   1293 	  WM_T_ICH9,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1295 	  "82801I (G) 10/100 LAN Controller",
   1296 	  WM_T_ICH9,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1298 	  "82801I (GT) 10/100 LAN Controller",
   1299 	  WM_T_ICH9,		WMP_F_COPPER },
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1301 	  "82801I (C) LAN Controller",
   1302 	  WM_T_ICH9,		WMP_F_COPPER },
   1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1304 	  "82801I mobile LAN Controller",
   1305 	  WM_T_ICH9,		WMP_F_COPPER },
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1307 	  "82801I mobile (V) LAN Controller",
   1308 	  WM_T_ICH9,		WMP_F_COPPER },
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1310 	  "82801I mobile (AMT) LAN Controller",
   1311 	  WM_T_ICH9,		WMP_F_COPPER },
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1313 	  "82567LM-4 LAN Controller",
   1314 	  WM_T_ICH9,		WMP_F_COPPER },
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1316 	  "82567LM-2 LAN Controller",
   1317 	  WM_T_ICH10,		WMP_F_COPPER },
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1319 	  "82567LF-2 LAN Controller",
   1320 	  WM_T_ICH10,		WMP_F_COPPER },
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1322 	  "82567LM-3 LAN Controller",
   1323 	  WM_T_ICH10,		WMP_F_COPPER },
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1325 	  "82567LF-3 LAN Controller",
   1326 	  WM_T_ICH10,		WMP_F_COPPER },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1328 	  "82567V-2 LAN Controller",
   1329 	  WM_T_ICH10,		WMP_F_COPPER },
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1331 	  "82567V-3? LAN Controller",
   1332 	  WM_T_ICH10,		WMP_F_COPPER },
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1334 	  "HANKSVILLE LAN Controller",
   1335 	  WM_T_ICH10,		WMP_F_COPPER },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1337 	  "PCH LAN (82577LM) Controller",
   1338 	  WM_T_PCH,		WMP_F_COPPER },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1340 	  "PCH LAN (82577LC) Controller",
   1341 	  WM_T_PCH,		WMP_F_COPPER },
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1343 	  "PCH LAN (82578DM) Controller",
   1344 	  WM_T_PCH,		WMP_F_COPPER },
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1346 	  "PCH LAN (82578DC) Controller",
   1347 	  WM_T_PCH,		WMP_F_COPPER },
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1349 	  "PCH2 LAN (82579LM) Controller",
   1350 	  WM_T_PCH2,		WMP_F_COPPER },
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1352 	  "PCH2 LAN (82579V) Controller",
   1353 	  WM_T_PCH2,		WMP_F_COPPER },
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1355 	  "82575EB dual-1000baseT Ethernet",
   1356 	  WM_T_82575,		WMP_F_COPPER },
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1358 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1359 	  WM_T_82575,		WMP_F_SERDES },
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1361 	  "82575GB quad-1000baseT Ethernet",
   1362 	  WM_T_82575,		WMP_F_COPPER },
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1364 	  "82575GB quad-1000baseT Ethernet (PM)",
   1365 	  WM_T_82575,		WMP_F_COPPER },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1367 	  "82576 1000BaseT Ethernet",
   1368 	  WM_T_82576,		WMP_F_COPPER },
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1370 	  "82576 1000BaseX Ethernet",
   1371 	  WM_T_82576,		WMP_F_FIBER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1374 	  "82576 gigabit Ethernet (SERDES)",
   1375 	  WM_T_82576,		WMP_F_SERDES },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1378 	  "82576 quad-1000BaseT Ethernet",
   1379 	  WM_T_82576,		WMP_F_COPPER },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1382 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1383 	  WM_T_82576,		WMP_F_COPPER },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1386 	  "82576 gigabit Ethernet",
   1387 	  WM_T_82576,		WMP_F_COPPER },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1390 	  "82576 gigabit Ethernet (SERDES)",
   1391 	  WM_T_82576,		WMP_F_SERDES },
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1393 	  "82576 quad-gigabit Ethernet (SERDES)",
   1394 	  WM_T_82576,		WMP_F_SERDES },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1397 	  "82580 1000BaseT Ethernet",
   1398 	  WM_T_82580,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1400 	  "82580 1000BaseX Ethernet",
   1401 	  WM_T_82580,		WMP_F_FIBER },
   1402 
   1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1404 	  "82580 1000BaseT Ethernet (SERDES)",
   1405 	  WM_T_82580,		WMP_F_SERDES },
   1406 
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1408 	  "82580 gigabit Ethernet (SGMII)",
   1409 	  WM_T_82580,		WMP_F_COPPER },
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1411 	  "82580 dual-1000BaseT Ethernet",
   1412 	  WM_T_82580,		WMP_F_COPPER },
   1413 
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1415 	  "82580 quad-1000BaseX Ethernet",
   1416 	  WM_T_82580,		WMP_F_FIBER },
   1417 
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1419 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1420 	  WM_T_82580,		WMP_F_COPPER },
   1421 
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1423 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1424 	  WM_T_82580,		WMP_F_SERDES },
   1425 
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1427 	  "DH89XXCC 1000BASE-KX Ethernet",
   1428 	  WM_T_82580,		WMP_F_SERDES },
   1429 
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1431 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1432 	  WM_T_82580,		WMP_F_SERDES },
   1433 
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1435 	  "I350 Gigabit Network Connection",
   1436 	  WM_T_I350,		WMP_F_COPPER },
   1437 
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1439 	  "I350 Gigabit Fiber Network Connection",
   1440 	  WM_T_I350,		WMP_F_FIBER },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1443 	  "I350 Gigabit Backplane Connection",
   1444 	  WM_T_I350,		WMP_F_SERDES },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1447 	  "I350 Quad Port Gigabit Ethernet",
   1448 	  WM_T_I350,		WMP_F_SERDES },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1451 	  "I350 Gigabit Connection",
   1452 	  WM_T_I350,		WMP_F_COPPER },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1455 	  "I354 Gigabit Ethernet (KX)",
   1456 	  WM_T_I354,		WMP_F_SERDES },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1459 	  "I354 Gigabit Ethernet (SGMII)",
   1460 	  WM_T_I354,		WMP_F_COPPER },
   1461 
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1463 	  "I354 Gigabit Ethernet (2.5G)",
   1464 	  WM_T_I354,		WMP_F_COPPER },
   1465 
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1467 	  "I210-T1 Ethernet Server Adapter",
   1468 	  WM_T_I210,		WMP_F_COPPER },
   1469 
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1471 	  "I210 Ethernet (Copper OEM)",
   1472 	  WM_T_I210,		WMP_F_COPPER },
   1473 
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1475 	  "I210 Ethernet (Copper IT)",
   1476 	  WM_T_I210,		WMP_F_COPPER },
   1477 
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1479 	  "I210 Ethernet (FLASH less)",
   1480 	  WM_T_I210,		WMP_F_COPPER },
   1481 
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1483 	  "I210 Gigabit Ethernet (Fiber)",
   1484 	  WM_T_I210,		WMP_F_FIBER },
   1485 
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1487 	  "I210 Gigabit Ethernet (SERDES)",
   1488 	  WM_T_I210,		WMP_F_SERDES },
   1489 
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1491 	  "I210 Gigabit Ethernet (FLASH less)",
   1492 	  WM_T_I210,		WMP_F_SERDES },
   1493 
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1495 	  "I210 Gigabit Ethernet (SGMII)",
   1496 	  WM_T_I210,		WMP_F_COPPER },
   1497 
   1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1499 	  "I211 Ethernet (COPPER)",
   1500 	  WM_T_I211,		WMP_F_COPPER },
   1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1502 	  "I217 V Ethernet Connection",
   1503 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1505 	  "I217 LM Ethernet Connection",
   1506 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1508 	  "I218 V Ethernet Connection",
   1509 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1511 	  "I218 V Ethernet Connection",
   1512 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1514 	  "I218 V Ethernet Connection",
   1515 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1517 	  "I218 LM Ethernet Connection",
   1518 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1520 	  "I218 LM Ethernet Connection",
   1521 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1523 	  "I218 LM Ethernet Connection",
   1524 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1526 	  "I219 V Ethernet Connection",
   1527 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1529 	  "I219 V Ethernet Connection",
   1530 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1531 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1532 	  "I219 V Ethernet Connection",
   1533 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1534 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1535 	  "I219 V Ethernet Connection",
   1536 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1537 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1538 	  "I219 LM Ethernet Connection",
   1539 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1540 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1541 	  "I219 LM Ethernet Connection",
   1542 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1543 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1544 	  "I219 LM Ethernet Connection",
   1545 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1546 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1547 	  "I219 LM Ethernet Connection",
   1548 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1549 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1550 	  "I219 LM Ethernet Connection",
   1551 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1552 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1553 	  "I219 V Ethernet Connection",
   1554 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1555 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1556 	  "I219 V Ethernet Connection",
   1557 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1558 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1559 	  "I219 LM Ethernet Connection",
   1560 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1561 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1562 	  "I219 LM Ethernet Connection",
   1563 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1564 	{ 0,			0,
   1565 	  NULL,
   1566 	  0,			0 },
   1567 };
   1568 
   1569 /*
   1570  * Register read/write functions.
   1571  * Other than CSR_{READ|WRITE}().
   1572  */
   1573 
   1574 #if 0 /* Not currently used */
   1575 static inline uint32_t
   1576 wm_io_read(struct wm_softc *sc, int reg)
   1577 {
   1578 
   1579 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1580 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1581 }
   1582 #endif
   1583 
   1584 static inline void
   1585 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1586 {
   1587 
   1588 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1589 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1590 }
   1591 
   1592 static inline void
   1593 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1594     uint32_t data)
   1595 {
   1596 	uint32_t regval;
   1597 	int i;
   1598 
   1599 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1600 
   1601 	CSR_WRITE(sc, reg, regval);
   1602 
   1603 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1604 		delay(5);
   1605 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1606 			break;
   1607 	}
   1608 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1609 		aprint_error("%s: WARNING:"
   1610 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1611 		    device_xname(sc->sc_dev), reg);
   1612 	}
   1613 }
   1614 
   1615 static inline void
   1616 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1617 {
   1618 	wa->wa_low = htole32(v & 0xffffffffU);
   1619 	if (sizeof(bus_addr_t) == 8)
   1620 		wa->wa_high = htole32((uint64_t) v >> 32);
   1621 	else
   1622 		wa->wa_high = 0;
   1623 }
   1624 
   1625 /*
   1626  * Descriptor sync/init functions.
   1627  */
   1628 static inline void
   1629 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1630 {
   1631 	struct wm_softc *sc = txq->txq_sc;
   1632 
   1633 	/* If it will wrap around, sync to the end of the ring. */
   1634 	if ((start + num) > WM_NTXDESC(txq)) {
   1635 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1636 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1637 		    (WM_NTXDESC(txq) - start), ops);
   1638 		num -= (WM_NTXDESC(txq) - start);
   1639 		start = 0;
   1640 	}
   1641 
   1642 	/* Now sync whatever is left. */
   1643 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1644 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1645 }
   1646 
   1647 static inline void
   1648 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1649 {
   1650 	struct wm_softc *sc = rxq->rxq_sc;
   1651 
   1652 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1653 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1654 }
   1655 
   1656 static inline void
   1657 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1658 {
   1659 	struct wm_softc *sc = rxq->rxq_sc;
   1660 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1661 	struct mbuf *m = rxs->rxs_mbuf;
   1662 
   1663 	/*
   1664 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1665 	 * so that the payload after the Ethernet header is aligned
   1666 	 * to a 4-byte boundary.
   1667 
   1668 	 * XXX BRAINDAMAGE ALERT!
   1669 	 * The stupid chip uses the same size for every buffer, which
   1670 	 * is set in the Receive Control register.  We are using the 2K
   1671 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1672 	 * reason, we can't "scoot" packets longer than the standard
   1673 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1674 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1675 	 * the upper layer copy the headers.
   1676 	 */
   1677 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1678 
   1679 	if (sc->sc_type == WM_T_82574) {
   1680 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1681 		rxd->erx_data.erxd_addr =
   1682 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1683 		rxd->erx_data.erxd_dd = 0;
   1684 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1685 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1686 
   1687 		rxd->nqrx_data.nrxd_paddr =
   1688 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1689 		/* Currently, split header is not supported. */
   1690 		rxd->nqrx_data.nrxd_haddr = 0;
   1691 	} else {
   1692 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1693 
   1694 		wm_set_dma_addr(&rxd->wrx_addr,
   1695 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1696 		rxd->wrx_len = 0;
   1697 		rxd->wrx_cksum = 0;
   1698 		rxd->wrx_status = 0;
   1699 		rxd->wrx_errors = 0;
   1700 		rxd->wrx_special = 0;
   1701 	}
   1702 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1703 
   1704 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1705 }
   1706 
   1707 /*
   1708  * Device driver interface functions and commonly used functions.
   1709  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1710  */
   1711 
   1712 /* Lookup supported device table */
   1713 static const struct wm_product *
   1714 wm_lookup(const struct pci_attach_args *pa)
   1715 {
   1716 	const struct wm_product *wmp;
   1717 
   1718 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1719 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1720 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1721 			return wmp;
   1722 	}
   1723 	return NULL;
   1724 }
   1725 
   1726 /* The match function (ca_match) */
   1727 static int
   1728 wm_match(device_t parent, cfdata_t cf, void *aux)
   1729 {
   1730 	struct pci_attach_args *pa = aux;
   1731 
   1732 	if (wm_lookup(pa) != NULL)
   1733 		return 1;
   1734 
   1735 	return 0;
   1736 }
   1737 
   1738 /* The attach function (ca_attach) */
   1739 static void
   1740 wm_attach(device_t parent, device_t self, void *aux)
   1741 {
   1742 	struct wm_softc *sc = device_private(self);
   1743 	struct pci_attach_args *pa = aux;
   1744 	prop_dictionary_t dict;
   1745 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1746 	pci_chipset_tag_t pc = pa->pa_pc;
   1747 	int counts[PCI_INTR_TYPE_SIZE];
   1748 	pci_intr_type_t max_type;
   1749 	const char *eetype, *xname;
   1750 	bus_space_tag_t memt;
   1751 	bus_space_handle_t memh;
   1752 	bus_size_t memsize;
   1753 	int memh_valid;
   1754 	int i, error;
   1755 	const struct wm_product *wmp;
   1756 	prop_data_t ea;
   1757 	prop_number_t pn;
   1758 	uint8_t enaddr[ETHER_ADDR_LEN];
   1759 	char buf[256];
   1760 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1761 	pcireg_t preg, memtype;
   1762 	uint16_t eeprom_data, apme_mask;
   1763 	bool force_clear_smbi;
   1764 	uint32_t link_mode;
   1765 	uint32_t reg;
   1766 
   1767 	sc->sc_dev = self;
   1768 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1769 	sc->sc_core_stopping = false;
   1770 
   1771 	wmp = wm_lookup(pa);
   1772 #ifdef DIAGNOSTIC
   1773 	if (wmp == NULL) {
   1774 		printf("\n");
   1775 		panic("wm_attach: impossible");
   1776 	}
   1777 #endif
   1778 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1779 
   1780 	sc->sc_pc = pa->pa_pc;
   1781 	sc->sc_pcitag = pa->pa_tag;
   1782 
   1783 	if (pci_dma64_available(pa))
   1784 		sc->sc_dmat = pa->pa_dmat64;
   1785 	else
   1786 		sc->sc_dmat = pa->pa_dmat;
   1787 
   1788 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1789 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1790 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1791 
   1792 	sc->sc_type = wmp->wmp_type;
   1793 
   1794 	/* Set default function pointers */
   1795 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1796 	sc->phy.release = sc->nvm.release = wm_put_null;
   1797 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1798 
   1799 	if (sc->sc_type < WM_T_82543) {
   1800 		if (sc->sc_rev < 2) {
   1801 			aprint_error_dev(sc->sc_dev,
   1802 			    "i82542 must be at least rev. 2\n");
   1803 			return;
   1804 		}
   1805 		if (sc->sc_rev < 3)
   1806 			sc->sc_type = WM_T_82542_2_0;
   1807 	}
   1808 
   1809 	/*
   1810 	 * Disable MSI for Errata:
   1811 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1812 	 *
   1813 	 *  82544: Errata 25
   1814 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1815 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1816 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1817 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1818 	 *
   1819 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1820 	 *
   1821 	 *  82571 & 82572: Errata 63
   1822 	 */
   1823 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1824 	    || (sc->sc_type == WM_T_82572))
   1825 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1826 
   1827 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1828 	    || (sc->sc_type == WM_T_82580)
   1829 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1830 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1831 		sc->sc_flags |= WM_F_NEWQUEUE;
   1832 
   1833 	/* Set device properties (mactype) */
   1834 	dict = device_properties(sc->sc_dev);
   1835 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1836 
   1837 	/*
   1838 	 * Map the device.  All devices support memory-mapped acccess,
   1839 	 * and it is really required for normal operation.
   1840 	 */
   1841 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1842 	switch (memtype) {
   1843 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1844 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1845 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1846 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1847 		break;
   1848 	default:
   1849 		memh_valid = 0;
   1850 		break;
   1851 	}
   1852 
   1853 	if (memh_valid) {
   1854 		sc->sc_st = memt;
   1855 		sc->sc_sh = memh;
   1856 		sc->sc_ss = memsize;
   1857 	} else {
   1858 		aprint_error_dev(sc->sc_dev,
   1859 		    "unable to map device registers\n");
   1860 		return;
   1861 	}
   1862 
   1863 	/*
   1864 	 * In addition, i82544 and later support I/O mapped indirect
   1865 	 * register access.  It is not desirable (nor supported in
   1866 	 * this driver) to use it for normal operation, though it is
   1867 	 * required to work around bugs in some chip versions.
   1868 	 */
   1869 	if (sc->sc_type >= WM_T_82544) {
   1870 		/* First we have to find the I/O BAR. */
   1871 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1872 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1873 			if (memtype == PCI_MAPREG_TYPE_IO)
   1874 				break;
   1875 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1876 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1877 				i += 4;	/* skip high bits, too */
   1878 		}
   1879 		if (i < PCI_MAPREG_END) {
   1880 			/*
   1881 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1882 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1883 			 * It's no problem because newer chips has no this
   1884 			 * bug.
   1885 			 *
   1886 			 * The i8254x doesn't apparently respond when the
   1887 			 * I/O BAR is 0, which looks somewhat like it's not
   1888 			 * been configured.
   1889 			 */
   1890 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1891 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1892 				aprint_error_dev(sc->sc_dev,
   1893 				    "WARNING: I/O BAR at zero.\n");
   1894 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1895 					0, &sc->sc_iot, &sc->sc_ioh,
   1896 					NULL, &sc->sc_ios) == 0) {
   1897 				sc->sc_flags |= WM_F_IOH_VALID;
   1898 			} else
   1899 				aprint_error_dev(sc->sc_dev,
   1900 				    "WARNING: unable to map I/O space\n");
   1901 		}
   1902 
   1903 	}
   1904 
   1905 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1906 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1907 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1908 	if (sc->sc_type < WM_T_82542_2_1)
   1909 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1910 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1911 
   1912 	/* power up chip */
   1913 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1914 	    && error != EOPNOTSUPP) {
   1915 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1916 		return;
   1917 	}
   1918 
   1919 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1920 	/*
   1921 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1922 	 * resource.
   1923 	 */
   1924 	if (sc->sc_nqueues > 1) {
   1925 		max_type = PCI_INTR_TYPE_MSIX;
   1926 		/*
   1927 		 *  82583 has a MSI-X capability in the PCI configuration space
   1928 		 * but it doesn't support it. At least the document doesn't
   1929 		 * say anything about MSI-X.
   1930 		 */
   1931 		counts[PCI_INTR_TYPE_MSIX]
   1932 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1933 	} else {
   1934 		max_type = PCI_INTR_TYPE_MSI;
   1935 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1936 	}
   1937 
   1938 	/* Allocation settings */
   1939 	counts[PCI_INTR_TYPE_MSI] = 1;
   1940 	counts[PCI_INTR_TYPE_INTX] = 1;
   1941 	/* overridden by disable flags */
   1942 	if (wm_disable_msi != 0) {
   1943 		counts[PCI_INTR_TYPE_MSI] = 0;
   1944 		if (wm_disable_msix != 0) {
   1945 			max_type = PCI_INTR_TYPE_INTX;
   1946 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1947 		}
   1948 	} else if (wm_disable_msix != 0) {
   1949 		max_type = PCI_INTR_TYPE_MSI;
   1950 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1951 	}
   1952 
   1953 alloc_retry:
   1954 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1955 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1956 		return;
   1957 	}
   1958 
   1959 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1960 		error = wm_setup_msix(sc);
   1961 		if (error) {
   1962 			pci_intr_release(pc, sc->sc_intrs,
   1963 			    counts[PCI_INTR_TYPE_MSIX]);
   1964 
   1965 			/* Setup for MSI: Disable MSI-X */
   1966 			max_type = PCI_INTR_TYPE_MSI;
   1967 			counts[PCI_INTR_TYPE_MSI] = 1;
   1968 			counts[PCI_INTR_TYPE_INTX] = 1;
   1969 			goto alloc_retry;
   1970 		}
   1971 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1972 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1973 		error = wm_setup_legacy(sc);
   1974 		if (error) {
   1975 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1976 			    counts[PCI_INTR_TYPE_MSI]);
   1977 
   1978 			/* The next try is for INTx: Disable MSI */
   1979 			max_type = PCI_INTR_TYPE_INTX;
   1980 			counts[PCI_INTR_TYPE_INTX] = 1;
   1981 			goto alloc_retry;
   1982 		}
   1983 	} else {
   1984 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1985 		error = wm_setup_legacy(sc);
   1986 		if (error) {
   1987 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1988 			    counts[PCI_INTR_TYPE_INTX]);
   1989 			return;
   1990 		}
   1991 	}
   1992 
   1993 	/*
   1994 	 * Check the function ID (unit number of the chip).
   1995 	 */
   1996 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1997 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   1998 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1999 	    || (sc->sc_type == WM_T_82580)
   2000 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   2001 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   2002 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   2003 	else
   2004 		sc->sc_funcid = 0;
   2005 
   2006 	/*
   2007 	 * Determine a few things about the bus we're connected to.
   2008 	 */
   2009 	if (sc->sc_type < WM_T_82543) {
   2010 		/* We don't really know the bus characteristics here. */
   2011 		sc->sc_bus_speed = 33;
   2012 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   2013 		/*
   2014 		 * CSA (Communication Streaming Architecture) is about as fast
   2015 		 * a 32-bit 66MHz PCI Bus.
   2016 		 */
   2017 		sc->sc_flags |= WM_F_CSA;
   2018 		sc->sc_bus_speed = 66;
   2019 		aprint_verbose_dev(sc->sc_dev,
   2020 		    "Communication Streaming Architecture\n");
   2021 		if (sc->sc_type == WM_T_82547) {
   2022 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2023 			callout_setfunc(&sc->sc_txfifo_ch,
   2024 			    wm_82547_txfifo_stall, sc);
   2025 			aprint_verbose_dev(sc->sc_dev,
   2026 			    "using 82547 Tx FIFO stall work-around\n");
   2027 		}
   2028 	} else if (sc->sc_type >= WM_T_82571) {
   2029 		sc->sc_flags |= WM_F_PCIE;
   2030 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2031 		    && (sc->sc_type != WM_T_ICH10)
   2032 		    && (sc->sc_type != WM_T_PCH)
   2033 		    && (sc->sc_type != WM_T_PCH2)
   2034 		    && (sc->sc_type != WM_T_PCH_LPT)
   2035 		    && (sc->sc_type != WM_T_PCH_SPT)
   2036 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2037 			/* ICH* and PCH* have no PCIe capability registers */
   2038 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2039 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2040 				NULL) == 0)
   2041 				aprint_error_dev(sc->sc_dev,
   2042 				    "unable to find PCIe capability\n");
   2043 		}
   2044 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2045 	} else {
   2046 		reg = CSR_READ(sc, WMREG_STATUS);
   2047 		if (reg & STATUS_BUS64)
   2048 			sc->sc_flags |= WM_F_BUS64;
   2049 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2050 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2051 
   2052 			sc->sc_flags |= WM_F_PCIX;
   2053 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2054 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2055 				aprint_error_dev(sc->sc_dev,
   2056 				    "unable to find PCIX capability\n");
   2057 			else if (sc->sc_type != WM_T_82545_3 &&
   2058 				 sc->sc_type != WM_T_82546_3) {
   2059 				/*
   2060 				 * Work around a problem caused by the BIOS
   2061 				 * setting the max memory read byte count
   2062 				 * incorrectly.
   2063 				 */
   2064 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2065 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2066 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2067 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2068 
   2069 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2070 				    PCIX_CMD_BYTECNT_SHIFT;
   2071 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2072 				    PCIX_STATUS_MAXB_SHIFT;
   2073 				if (bytecnt > maxb) {
   2074 					aprint_verbose_dev(sc->sc_dev,
   2075 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2076 					    512 << bytecnt, 512 << maxb);
   2077 					pcix_cmd = (pcix_cmd &
   2078 					    ~PCIX_CMD_BYTECNT_MASK) |
   2079 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2080 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2081 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2082 					    pcix_cmd);
   2083 				}
   2084 			}
   2085 		}
   2086 		/*
   2087 		 * The quad port adapter is special; it has a PCIX-PCIX
   2088 		 * bridge on the board, and can run the secondary bus at
   2089 		 * a higher speed.
   2090 		 */
   2091 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2092 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2093 								      : 66;
   2094 		} else if (sc->sc_flags & WM_F_PCIX) {
   2095 			switch (reg & STATUS_PCIXSPD_MASK) {
   2096 			case STATUS_PCIXSPD_50_66:
   2097 				sc->sc_bus_speed = 66;
   2098 				break;
   2099 			case STATUS_PCIXSPD_66_100:
   2100 				sc->sc_bus_speed = 100;
   2101 				break;
   2102 			case STATUS_PCIXSPD_100_133:
   2103 				sc->sc_bus_speed = 133;
   2104 				break;
   2105 			default:
   2106 				aprint_error_dev(sc->sc_dev,
   2107 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2108 				    reg & STATUS_PCIXSPD_MASK);
   2109 				sc->sc_bus_speed = 66;
   2110 				break;
   2111 			}
   2112 		} else
   2113 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2114 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2115 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2116 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2117 	}
   2118 
   2119 	/* clear interesting stat counters */
   2120 	CSR_READ(sc, WMREG_COLC);
   2121 	CSR_READ(sc, WMREG_RXERRC);
   2122 
   2123 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2124 	    || (sc->sc_type >= WM_T_ICH8))
   2125 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2126 	if (sc->sc_type >= WM_T_ICH8)
   2127 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2128 
   2129 	/* Set PHY, NVM mutex related stuff */
   2130 	switch (sc->sc_type) {
   2131 	case WM_T_82542_2_0:
   2132 	case WM_T_82542_2_1:
   2133 	case WM_T_82543:
   2134 	case WM_T_82544:
   2135 		/* Microwire */
   2136 		sc->nvm.read = wm_nvm_read_uwire;
   2137 		sc->sc_nvm_wordsize = 64;
   2138 		sc->sc_nvm_addrbits = 6;
   2139 		break;
   2140 	case WM_T_82540:
   2141 	case WM_T_82545:
   2142 	case WM_T_82545_3:
   2143 	case WM_T_82546:
   2144 	case WM_T_82546_3:
   2145 		/* Microwire */
   2146 		sc->nvm.read = wm_nvm_read_uwire;
   2147 		reg = CSR_READ(sc, WMREG_EECD);
   2148 		if (reg & EECD_EE_SIZE) {
   2149 			sc->sc_nvm_wordsize = 256;
   2150 			sc->sc_nvm_addrbits = 8;
   2151 		} else {
   2152 			sc->sc_nvm_wordsize = 64;
   2153 			sc->sc_nvm_addrbits = 6;
   2154 		}
   2155 		sc->sc_flags |= WM_F_LOCK_EECD;
   2156 		sc->nvm.acquire = wm_get_eecd;
   2157 		sc->nvm.release = wm_put_eecd;
   2158 		break;
   2159 	case WM_T_82541:
   2160 	case WM_T_82541_2:
   2161 	case WM_T_82547:
   2162 	case WM_T_82547_2:
   2163 		reg = CSR_READ(sc, WMREG_EECD);
   2164 		/*
   2165 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2166 		 * on 8254[17], so set flags and functios before calling it.
   2167 		 */
   2168 		sc->sc_flags |= WM_F_LOCK_EECD;
   2169 		sc->nvm.acquire = wm_get_eecd;
   2170 		sc->nvm.release = wm_put_eecd;
   2171 		if (reg & EECD_EE_TYPE) {
   2172 			/* SPI */
   2173 			sc->nvm.read = wm_nvm_read_spi;
   2174 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2175 			wm_nvm_set_addrbits_size_eecd(sc);
   2176 		} else {
   2177 			/* Microwire */
   2178 			sc->nvm.read = wm_nvm_read_uwire;
   2179 			if ((reg & EECD_EE_ABITS) != 0) {
   2180 				sc->sc_nvm_wordsize = 256;
   2181 				sc->sc_nvm_addrbits = 8;
   2182 			} else {
   2183 				sc->sc_nvm_wordsize = 64;
   2184 				sc->sc_nvm_addrbits = 6;
   2185 			}
   2186 		}
   2187 		break;
   2188 	case WM_T_82571:
   2189 	case WM_T_82572:
   2190 		/* SPI */
   2191 		sc->nvm.read = wm_nvm_read_eerd;
   2192 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2193 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2194 		wm_nvm_set_addrbits_size_eecd(sc);
   2195 		sc->phy.acquire = wm_get_swsm_semaphore;
   2196 		sc->phy.release = wm_put_swsm_semaphore;
   2197 		sc->nvm.acquire = wm_get_nvm_82571;
   2198 		sc->nvm.release = wm_put_nvm_82571;
   2199 		break;
   2200 	case WM_T_82573:
   2201 	case WM_T_82574:
   2202 	case WM_T_82583:
   2203 		sc->nvm.read = wm_nvm_read_eerd;
   2204 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2205 		if (sc->sc_type == WM_T_82573) {
   2206 			sc->phy.acquire = wm_get_swsm_semaphore;
   2207 			sc->phy.release = wm_put_swsm_semaphore;
   2208 			sc->nvm.acquire = wm_get_nvm_82571;
   2209 			sc->nvm.release = wm_put_nvm_82571;
   2210 		} else {
   2211 			/* Both PHY and NVM use the same semaphore. */
   2212 			sc->phy.acquire = sc->nvm.acquire
   2213 			    = wm_get_swfwhw_semaphore;
   2214 			sc->phy.release = sc->nvm.release
   2215 			    = wm_put_swfwhw_semaphore;
   2216 		}
   2217 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2218 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2219 			sc->sc_nvm_wordsize = 2048;
   2220 		} else {
   2221 			/* SPI */
   2222 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2223 			wm_nvm_set_addrbits_size_eecd(sc);
   2224 		}
   2225 		break;
   2226 	case WM_T_82575:
   2227 	case WM_T_82576:
   2228 	case WM_T_82580:
   2229 	case WM_T_I350:
   2230 	case WM_T_I354:
   2231 	case WM_T_80003:
   2232 		/* SPI */
   2233 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2234 		wm_nvm_set_addrbits_size_eecd(sc);
   2235 		if ((sc->sc_type == WM_T_80003)
   2236 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2237 			sc->nvm.read = wm_nvm_read_eerd;
   2238 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2239 		} else {
   2240 			sc->nvm.read = wm_nvm_read_spi;
   2241 			sc->sc_flags |= WM_F_LOCK_EECD;
   2242 		}
   2243 		sc->phy.acquire = wm_get_phy_82575;
   2244 		sc->phy.release = wm_put_phy_82575;
   2245 		sc->nvm.acquire = wm_get_nvm_80003;
   2246 		sc->nvm.release = wm_put_nvm_80003;
   2247 		break;
   2248 	case WM_T_ICH8:
   2249 	case WM_T_ICH9:
   2250 	case WM_T_ICH10:
   2251 	case WM_T_PCH:
   2252 	case WM_T_PCH2:
   2253 	case WM_T_PCH_LPT:
   2254 		sc->nvm.read = wm_nvm_read_ich8;
   2255 		/* FLASH */
   2256 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2257 		sc->sc_nvm_wordsize = 2048;
   2258 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2259 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2260 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2261 			aprint_error_dev(sc->sc_dev,
   2262 			    "can't map FLASH registers\n");
   2263 			goto out;
   2264 		}
   2265 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2266 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2267 		    ICH_FLASH_SECTOR_SIZE;
   2268 		sc->sc_ich8_flash_bank_size =
   2269 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2270 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2271 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2272 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2273 		sc->sc_flashreg_offset = 0;
   2274 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2275 		sc->phy.release = wm_put_swflag_ich8lan;
   2276 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2277 		sc->nvm.release = wm_put_nvm_ich8lan;
   2278 		break;
   2279 	case WM_T_PCH_SPT:
   2280 	case WM_T_PCH_CNP:
   2281 		sc->nvm.read = wm_nvm_read_spt;
   2282 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2283 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2284 		sc->sc_flasht = sc->sc_st;
   2285 		sc->sc_flashh = sc->sc_sh;
   2286 		sc->sc_ich8_flash_base = 0;
   2287 		sc->sc_nvm_wordsize =
   2288 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2289 		    * NVM_SIZE_MULTIPLIER;
   2290 		/* It is size in bytes, we want words */
   2291 		sc->sc_nvm_wordsize /= 2;
   2292 		/* assume 2 banks */
   2293 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2294 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2295 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2296 		sc->phy.release = wm_put_swflag_ich8lan;
   2297 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2298 		sc->nvm.release = wm_put_nvm_ich8lan;
   2299 		break;
   2300 	case WM_T_I210:
   2301 	case WM_T_I211:
   2302 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2303 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2304 		if (wm_nvm_flash_presence_i210(sc)) {
   2305 			sc->nvm.read = wm_nvm_read_eerd;
   2306 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2307 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2308 			wm_nvm_set_addrbits_size_eecd(sc);
   2309 		} else {
   2310 			sc->nvm.read = wm_nvm_read_invm;
   2311 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2312 			sc->sc_nvm_wordsize = INVM_SIZE;
   2313 		}
   2314 		sc->phy.acquire = wm_get_phy_82575;
   2315 		sc->phy.release = wm_put_phy_82575;
   2316 		sc->nvm.acquire = wm_get_nvm_80003;
   2317 		sc->nvm.release = wm_put_nvm_80003;
   2318 		break;
   2319 	default:
   2320 		break;
   2321 	}
   2322 
   2323 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2324 	switch (sc->sc_type) {
   2325 	case WM_T_82571:
   2326 	case WM_T_82572:
   2327 		reg = CSR_READ(sc, WMREG_SWSM2);
   2328 		if ((reg & SWSM2_LOCK) == 0) {
   2329 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2330 			force_clear_smbi = true;
   2331 		} else
   2332 			force_clear_smbi = false;
   2333 		break;
   2334 	case WM_T_82573:
   2335 	case WM_T_82574:
   2336 	case WM_T_82583:
   2337 		force_clear_smbi = true;
   2338 		break;
   2339 	default:
   2340 		force_clear_smbi = false;
   2341 		break;
   2342 	}
   2343 	if (force_clear_smbi) {
   2344 		reg = CSR_READ(sc, WMREG_SWSM);
   2345 		if ((reg & SWSM_SMBI) != 0)
   2346 			aprint_error_dev(sc->sc_dev,
   2347 			    "Please update the Bootagent\n");
   2348 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2349 	}
   2350 
   2351 	/*
   2352 	 * Defer printing the EEPROM type until after verifying the checksum
   2353 	 * This allows the EEPROM type to be printed correctly in the case
   2354 	 * that no EEPROM is attached.
   2355 	 */
   2356 	/*
   2357 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2358 	 * this for later, so we can fail future reads from the EEPROM.
   2359 	 */
   2360 	if (wm_nvm_validate_checksum(sc)) {
   2361 		/*
   2362 		 * Read twice again because some PCI-e parts fail the
   2363 		 * first check due to the link being in sleep state.
   2364 		 */
   2365 		if (wm_nvm_validate_checksum(sc))
   2366 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2367 	}
   2368 
   2369 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2370 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2371 	else {
   2372 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2373 		    sc->sc_nvm_wordsize);
   2374 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2375 			aprint_verbose("iNVM");
   2376 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2377 			aprint_verbose("FLASH(HW)");
   2378 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2379 			aprint_verbose("FLASH");
   2380 		else {
   2381 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2382 				eetype = "SPI";
   2383 			else
   2384 				eetype = "MicroWire";
   2385 			aprint_verbose("(%d address bits) %s EEPROM",
   2386 			    sc->sc_nvm_addrbits, eetype);
   2387 		}
   2388 	}
   2389 	wm_nvm_version(sc);
   2390 	aprint_verbose("\n");
   2391 
   2392 	/*
   2393 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2394 	 * incorrect.
   2395 	 */
   2396 	wm_gmii_setup_phytype(sc, 0, 0);
   2397 
   2398 	/* Check for WM_F_WOL on some chips before wm_reset() */
   2399 	switch (sc->sc_type) {
   2400 	case WM_T_ICH8:
   2401 	case WM_T_ICH9:
   2402 	case WM_T_ICH10:
   2403 	case WM_T_PCH:
   2404 	case WM_T_PCH2:
   2405 	case WM_T_PCH_LPT:
   2406 	case WM_T_PCH_SPT:
   2407 	case WM_T_PCH_CNP:
   2408 		apme_mask = WUC_APME;
   2409 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2410 		if ((eeprom_data & apme_mask) != 0)
   2411 			sc->sc_flags |= WM_F_WOL;
   2412 		break;
   2413 	default:
   2414 		break;
   2415 	}
   2416 
   2417 	/* Reset the chip to a known state. */
   2418 	wm_reset(sc);
   2419 
   2420 	/*
   2421 	 * Check for I21[01] PLL workaround.
   2422 	 *
   2423 	 * Three cases:
   2424 	 * a) Chip is I211.
   2425 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2426 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2427 	 */
   2428 	if (sc->sc_type == WM_T_I211)
   2429 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2430 	if (sc->sc_type == WM_T_I210) {
   2431 		if (!wm_nvm_flash_presence_i210(sc))
   2432 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2433 		else if ((sc->sc_nvm_ver_major < 3)
   2434 		    || ((sc->sc_nvm_ver_major == 3)
   2435 			&& (sc->sc_nvm_ver_minor < 25))) {
   2436 			aprint_verbose_dev(sc->sc_dev,
   2437 			    "ROM image version %d.%d is older than 3.25\n",
   2438 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2439 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2440 		}
   2441 	}
   2442 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2443 		wm_pll_workaround_i210(sc);
   2444 
   2445 	wm_get_wakeup(sc);
   2446 
   2447 	/* Non-AMT based hardware can now take control from firmware */
   2448 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2449 		wm_get_hw_control(sc);
   2450 
   2451 	/*
   2452 	 * Read the Ethernet address from the EEPROM, if not first found
   2453 	 * in device properties.
   2454 	 */
   2455 	ea = prop_dictionary_get(dict, "mac-address");
   2456 	if (ea != NULL) {
   2457 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2458 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2459 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2460 	} else {
   2461 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2462 			aprint_error_dev(sc->sc_dev,
   2463 			    "unable to read Ethernet address\n");
   2464 			goto out;
   2465 		}
   2466 	}
   2467 
   2468 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2469 	    ether_sprintf(enaddr));
   2470 
   2471 	/*
   2472 	 * Read the config info from the EEPROM, and set up various
   2473 	 * bits in the control registers based on their contents.
   2474 	 */
   2475 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2476 	if (pn != NULL) {
   2477 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2478 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2479 	} else {
   2480 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2481 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2482 			goto out;
   2483 		}
   2484 	}
   2485 
   2486 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2487 	if (pn != NULL) {
   2488 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2489 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2490 	} else {
   2491 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2492 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2493 			goto out;
   2494 		}
   2495 	}
   2496 
   2497 	/* check for WM_F_WOL */
   2498 	switch (sc->sc_type) {
   2499 	case WM_T_82542_2_0:
   2500 	case WM_T_82542_2_1:
   2501 	case WM_T_82543:
   2502 		/* dummy? */
   2503 		eeprom_data = 0;
   2504 		apme_mask = NVM_CFG3_APME;
   2505 		break;
   2506 	case WM_T_82544:
   2507 		apme_mask = NVM_CFG2_82544_APM_EN;
   2508 		eeprom_data = cfg2;
   2509 		break;
   2510 	case WM_T_82546:
   2511 	case WM_T_82546_3:
   2512 	case WM_T_82571:
   2513 	case WM_T_82572:
   2514 	case WM_T_82573:
   2515 	case WM_T_82574:
   2516 	case WM_T_82583:
   2517 	case WM_T_80003:
   2518 	case WM_T_82575:
   2519 	case WM_T_82576:
   2520 		apme_mask = NVM_CFG3_APME;
   2521 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2522 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2523 		break;
   2524 	case WM_T_82580:
   2525 	case WM_T_I350:
   2526 	case WM_T_I354:
   2527 	case WM_T_I210:
   2528 	case WM_T_I211:
   2529 		apme_mask = NVM_CFG3_APME;
   2530 		wm_nvm_read(sc,
   2531 		    NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + NVM_OFF_CFG3_PORTA,
   2532 		    1, &eeprom_data);
   2533 		break;
   2534 	case WM_T_ICH8:
   2535 	case WM_T_ICH9:
   2536 	case WM_T_ICH10:
   2537 	case WM_T_PCH:
   2538 	case WM_T_PCH2:
   2539 	case WM_T_PCH_LPT:
   2540 	case WM_T_PCH_SPT:
   2541 	case WM_T_PCH_CNP:
   2542 		/* Already checked before wm_reset () */
   2543 		apme_mask = eeprom_data = 0;
   2544 		break;
   2545 	default: /* XXX 82540 */
   2546 		apme_mask = NVM_CFG3_APME;
   2547 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2548 		break;
   2549 	}
   2550 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2551 	if ((eeprom_data & apme_mask) != 0)
   2552 		sc->sc_flags |= WM_F_WOL;
   2553 
   2554 	/*
   2555 	 * We have the eeprom settings, now apply the special cases
   2556 	 * where the eeprom may be wrong or the board won't support
   2557 	 * wake on lan on a particular port
   2558 	 */
   2559 	switch (sc->sc_pcidevid) {
   2560 	case PCI_PRODUCT_INTEL_82546GB_PCIE:
   2561 		sc->sc_flags &= ~WM_F_WOL;
   2562 		break;
   2563 	case PCI_PRODUCT_INTEL_82546EB_FIBER:
   2564 	case PCI_PRODUCT_INTEL_82546GB_FIBER:
   2565 		/* Wake events only supported on port A for dual fiber
   2566 		 * regardless of eeprom setting */
   2567 		if (sc->sc_funcid == 1)
   2568 			sc->sc_flags &= ~WM_F_WOL;
   2569 		break;
   2570 	case PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3:
   2571 		/* if quad port adapter, disable WoL on all but port A */
   2572 		if (sc->sc_funcid != 0)
   2573 			sc->sc_flags &= ~WM_F_WOL;
   2574 		break;
   2575 	case PCI_PRODUCT_INTEL_82571EB_FIBER:
   2576 		/* Wake events only supported on port A for dual fiber
   2577 		 * regardless of eeprom setting */
   2578 		if (sc->sc_funcid == 1)
   2579 			sc->sc_flags &= ~WM_F_WOL;
   2580 		break;
   2581 	case PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER:
   2582 	case PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER:
   2583 	case PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER:
   2584 		/* if quad port adapter, disable WoL on all but port A */
   2585 		if (sc->sc_funcid != 0)
   2586 			sc->sc_flags &= ~WM_F_WOL;
   2587 		break;
   2588 	}
   2589 
   2590 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2591 		/* Check NVM for autonegotiation */
   2592 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2593 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2594 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2595 		}
   2596 	}
   2597 
   2598 	/*
   2599 	 * XXX need special handling for some multiple port cards
   2600 	 * to disable a paticular port.
   2601 	 */
   2602 
   2603 	if (sc->sc_type >= WM_T_82544) {
   2604 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2605 		if (pn != NULL) {
   2606 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2607 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2608 		} else {
   2609 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2610 				aprint_error_dev(sc->sc_dev,
   2611 				    "unable to read SWDPIN\n");
   2612 				goto out;
   2613 			}
   2614 		}
   2615 	}
   2616 
   2617 	if (cfg1 & NVM_CFG1_ILOS)
   2618 		sc->sc_ctrl |= CTRL_ILOS;
   2619 
   2620 	/*
   2621 	 * XXX
   2622 	 * This code isn't correct because pin 2 and 3 are located
   2623 	 * in different position on newer chips. Check all datasheet.
   2624 	 *
   2625 	 * Until resolve this problem, check if a chip < 82580
   2626 	 */
   2627 	if (sc->sc_type <= WM_T_82580) {
   2628 		if (sc->sc_type >= WM_T_82544) {
   2629 			sc->sc_ctrl |=
   2630 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2631 			    CTRL_SWDPIO_SHIFT;
   2632 			sc->sc_ctrl |=
   2633 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2634 			    CTRL_SWDPINS_SHIFT;
   2635 		} else {
   2636 			sc->sc_ctrl |=
   2637 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2638 			    CTRL_SWDPIO_SHIFT;
   2639 		}
   2640 	}
   2641 
   2642 	/* XXX For other than 82580? */
   2643 	if (sc->sc_type == WM_T_82580) {
   2644 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2645 		if (nvmword & __BIT(13))
   2646 			sc->sc_ctrl |= CTRL_ILOS;
   2647 	}
   2648 
   2649 #if 0
   2650 	if (sc->sc_type >= WM_T_82544) {
   2651 		if (cfg1 & NVM_CFG1_IPS0)
   2652 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2653 		if (cfg1 & NVM_CFG1_IPS1)
   2654 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2655 		sc->sc_ctrl_ext |=
   2656 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2657 		    CTRL_EXT_SWDPIO_SHIFT;
   2658 		sc->sc_ctrl_ext |=
   2659 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2660 		    CTRL_EXT_SWDPINS_SHIFT;
   2661 	} else {
   2662 		sc->sc_ctrl_ext |=
   2663 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2664 		    CTRL_EXT_SWDPIO_SHIFT;
   2665 	}
   2666 #endif
   2667 
   2668 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2669 #if 0
   2670 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2671 #endif
   2672 
   2673 	if (sc->sc_type == WM_T_PCH) {
   2674 		uint16_t val;
   2675 
   2676 		/* Save the NVM K1 bit setting */
   2677 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2678 
   2679 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2680 			sc->sc_nvm_k1_enabled = 1;
   2681 		else
   2682 			sc->sc_nvm_k1_enabled = 0;
   2683 	}
   2684 
   2685 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2686 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2687 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2688 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2689 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2690 	    || sc->sc_type == WM_T_82573
   2691 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2692 		/* Copper only */
   2693 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2694 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2695 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2696 	    || (sc->sc_type ==WM_T_I211)) {
   2697 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2698 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2699 		switch (link_mode) {
   2700 		case CTRL_EXT_LINK_MODE_1000KX:
   2701 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2702 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2703 			break;
   2704 		case CTRL_EXT_LINK_MODE_SGMII:
   2705 			if (wm_sgmii_uses_mdio(sc)) {
   2706 				aprint_verbose_dev(sc->sc_dev,
   2707 				    "SGMII(MDIO)\n");
   2708 				sc->sc_flags |= WM_F_SGMII;
   2709 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2710 				break;
   2711 			}
   2712 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2713 			/*FALLTHROUGH*/
   2714 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2715 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2716 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2717 				if (link_mode
   2718 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2719 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2720 					sc->sc_flags |= WM_F_SGMII;
   2721 				} else {
   2722 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2723 					aprint_verbose_dev(sc->sc_dev,
   2724 					    "SERDES\n");
   2725 				}
   2726 				break;
   2727 			}
   2728 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2729 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2730 
   2731 			/* Change current link mode setting */
   2732 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2733 			switch (sc->sc_mediatype) {
   2734 			case WM_MEDIATYPE_COPPER:
   2735 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2736 				break;
   2737 			case WM_MEDIATYPE_SERDES:
   2738 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2739 				break;
   2740 			default:
   2741 				break;
   2742 			}
   2743 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2744 			break;
   2745 		case CTRL_EXT_LINK_MODE_GMII:
   2746 		default:
   2747 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2748 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2749 			break;
   2750 		}
   2751 
   2752 		reg &= ~CTRL_EXT_I2C_ENA;
   2753 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2754 			reg |= CTRL_EXT_I2C_ENA;
   2755 		else
   2756 			reg &= ~CTRL_EXT_I2C_ENA;
   2757 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2758 	} else if (sc->sc_type < WM_T_82543 ||
   2759 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2760 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2761 			aprint_error_dev(sc->sc_dev,
   2762 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2763 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2764 		}
   2765 	} else {
   2766 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2767 			aprint_error_dev(sc->sc_dev,
   2768 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2769 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2770 		}
   2771 	}
   2772 
   2773 	if (sc->sc_type >= WM_T_PCH2)
   2774 		sc->sc_flags |= WM_F_EEE;
   2775 	else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211)
   2776 	    && (sc->sc_mediatype == WM_MEDIATYPE_COPPER)) {
   2777 		/* XXX: Need special handling for I354. (not yet) */
   2778 		if (sc->sc_type != WM_T_I354)
   2779 			sc->sc_flags |= WM_F_EEE;
   2780 	}
   2781 
   2782 	/* Set device properties (macflags) */
   2783 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2784 
   2785 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2786 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2787 
   2788 	/* Initialize the media structures accordingly. */
   2789 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2790 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2791 	else
   2792 		wm_tbi_mediainit(sc); /* All others */
   2793 
   2794 	ifp = &sc->sc_ethercom.ec_if;
   2795 	xname = device_xname(sc->sc_dev);
   2796 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2797 	ifp->if_softc = sc;
   2798 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2799 #ifdef WM_MPSAFE
   2800 	ifp->if_extflags = IFEF_MPSAFE;
   2801 #endif
   2802 	ifp->if_ioctl = wm_ioctl;
   2803 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2804 		ifp->if_start = wm_nq_start;
   2805 		/*
   2806 		 * When the number of CPUs is one and the controller can use
   2807 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2808 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2809 		 * and the other is used for link status changing.
   2810 		 * In this situation, wm_nq_transmit() is disadvantageous
   2811 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2812 		 */
   2813 		if (wm_is_using_multiqueue(sc))
   2814 			ifp->if_transmit = wm_nq_transmit;
   2815 	} else {
   2816 		ifp->if_start = wm_start;
   2817 		/*
   2818 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2819 		 */
   2820 		if (wm_is_using_multiqueue(sc))
   2821 			ifp->if_transmit = wm_transmit;
   2822 	}
   2823 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2824 	ifp->if_init = wm_init;
   2825 	ifp->if_stop = wm_stop;
   2826 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2827 	IFQ_SET_READY(&ifp->if_snd);
   2828 
   2829 	/* Check for jumbo frame */
   2830 	switch (sc->sc_type) {
   2831 	case WM_T_82573:
   2832 		/* XXX limited to 9234 if ASPM is disabled */
   2833 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2834 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2835 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2836 		break;
   2837 	case WM_T_82571:
   2838 	case WM_T_82572:
   2839 	case WM_T_82574:
   2840 	case WM_T_82583:
   2841 	case WM_T_82575:
   2842 	case WM_T_82576:
   2843 	case WM_T_82580:
   2844 	case WM_T_I350:
   2845 	case WM_T_I354:
   2846 	case WM_T_I210:
   2847 	case WM_T_I211:
   2848 	case WM_T_80003:
   2849 	case WM_T_ICH9:
   2850 	case WM_T_ICH10:
   2851 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2852 	case WM_T_PCH_LPT:
   2853 	case WM_T_PCH_SPT:
   2854 	case WM_T_PCH_CNP:
   2855 		/* XXX limited to 9234 */
   2856 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2857 		break;
   2858 	case WM_T_PCH:
   2859 		/* XXX limited to 4096 */
   2860 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2861 		break;
   2862 	case WM_T_82542_2_0:
   2863 	case WM_T_82542_2_1:
   2864 	case WM_T_ICH8:
   2865 		/* No support for jumbo frame */
   2866 		break;
   2867 	default:
   2868 		/* ETHER_MAX_LEN_JUMBO */
   2869 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2870 		break;
   2871 	}
   2872 
   2873 	/* If we're a i82543 or greater, we can support VLANs. */
   2874 	if (sc->sc_type >= WM_T_82543)
   2875 		sc->sc_ethercom.ec_capabilities |=
   2876 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2877 
   2878 	if ((sc->sc_flags & WM_F_EEE) != 0)
   2879 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_EEE;
   2880 
   2881 	/*
   2882 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2883 	 * on i82543 and later.
   2884 	 */
   2885 	if (sc->sc_type >= WM_T_82543) {
   2886 		ifp->if_capabilities |=
   2887 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2888 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2889 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2890 		    IFCAP_CSUM_TCPv6_Tx |
   2891 		    IFCAP_CSUM_UDPv6_Tx;
   2892 	}
   2893 
   2894 	/*
   2895 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2896 	 *
   2897 	 *	82541GI (8086:1076) ... no
   2898 	 *	82572EI (8086:10b9) ... yes
   2899 	 */
   2900 	if (sc->sc_type >= WM_T_82571) {
   2901 		ifp->if_capabilities |=
   2902 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2903 	}
   2904 
   2905 	/*
   2906 	 * If we're a i82544 or greater (except i82547), we can do
   2907 	 * TCP segmentation offload.
   2908 	 */
   2909 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2910 		ifp->if_capabilities |= IFCAP_TSOv4;
   2911 	}
   2912 
   2913 	if (sc->sc_type >= WM_T_82571) {
   2914 		ifp->if_capabilities |= IFCAP_TSOv6;
   2915 	}
   2916 
   2917 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2918 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2919 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2920 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2921 
   2922 #ifdef WM_MPSAFE
   2923 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2924 #else
   2925 	sc->sc_core_lock = NULL;
   2926 #endif
   2927 
   2928 	/* Attach the interface. */
   2929 	error = if_initialize(ifp);
   2930 	if (error != 0) {
   2931 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2932 		    error);
   2933 		return; /* Error */
   2934 	}
   2935 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2936 	ether_ifattach(ifp, enaddr);
   2937 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2938 	if_register(ifp);
   2939 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2940 	    RND_FLAG_DEFAULT);
   2941 
   2942 #ifdef WM_EVENT_COUNTERS
   2943 	/* Attach event counters. */
   2944 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2945 	    NULL, xname, "linkintr");
   2946 
   2947 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2948 	    NULL, xname, "tx_xoff");
   2949 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2950 	    NULL, xname, "tx_xon");
   2951 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2952 	    NULL, xname, "rx_xoff");
   2953 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2954 	    NULL, xname, "rx_xon");
   2955 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2956 	    NULL, xname, "rx_macctl");
   2957 #endif /* WM_EVENT_COUNTERS */
   2958 
   2959 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2960 		pmf_class_network_register(self, ifp);
   2961 	else
   2962 		aprint_error_dev(self, "couldn't establish power handler\n");
   2963 
   2964 	sc->sc_flags |= WM_F_ATTACHED;
   2965 out:
   2966 	return;
   2967 }
   2968 
   2969 /* The detach function (ca_detach) */
   2970 static int
   2971 wm_detach(device_t self, int flags __unused)
   2972 {
   2973 	struct wm_softc *sc = device_private(self);
   2974 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2975 	int i;
   2976 
   2977 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2978 		return 0;
   2979 
   2980 	/* Stop the interface. Callouts are stopped in it. */
   2981 	wm_stop(ifp, 1);
   2982 
   2983 	pmf_device_deregister(self);
   2984 
   2985 #ifdef WM_EVENT_COUNTERS
   2986 	evcnt_detach(&sc->sc_ev_linkintr);
   2987 
   2988 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2989 	evcnt_detach(&sc->sc_ev_tx_xon);
   2990 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2991 	evcnt_detach(&sc->sc_ev_rx_xon);
   2992 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2993 #endif /* WM_EVENT_COUNTERS */
   2994 
   2995 	/* Tell the firmware about the release */
   2996 	WM_CORE_LOCK(sc);
   2997 	wm_release_manageability(sc);
   2998 	wm_release_hw_control(sc);
   2999 	wm_enable_wakeup(sc);
   3000 	WM_CORE_UNLOCK(sc);
   3001 
   3002 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   3003 
   3004 	/* Delete all remaining media. */
   3005 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   3006 
   3007 	ether_ifdetach(ifp);
   3008 	if_detach(ifp);
   3009 	if_percpuq_destroy(sc->sc_ipq);
   3010 
   3011 	/* Unload RX dmamaps and free mbufs */
   3012 	for (i = 0; i < sc->sc_nqueues; i++) {
   3013 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   3014 		mutex_enter(rxq->rxq_lock);
   3015 		wm_rxdrain(rxq);
   3016 		mutex_exit(rxq->rxq_lock);
   3017 	}
   3018 	/* Must unlock here */
   3019 
   3020 	/* Disestablish the interrupt handler */
   3021 	for (i = 0; i < sc->sc_nintrs; i++) {
   3022 		if (sc->sc_ihs[i] != NULL) {
   3023 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   3024 			sc->sc_ihs[i] = NULL;
   3025 		}
   3026 	}
   3027 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   3028 
   3029 	wm_free_txrx_queues(sc);
   3030 
   3031 	/* Unmap the registers */
   3032 	if (sc->sc_ss) {
   3033 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   3034 		sc->sc_ss = 0;
   3035 	}
   3036 	if (sc->sc_ios) {
   3037 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   3038 		sc->sc_ios = 0;
   3039 	}
   3040 	if (sc->sc_flashs) {
   3041 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   3042 		sc->sc_flashs = 0;
   3043 	}
   3044 
   3045 	if (sc->sc_core_lock)
   3046 		mutex_obj_free(sc->sc_core_lock);
   3047 	if (sc->sc_ich_phymtx)
   3048 		mutex_obj_free(sc->sc_ich_phymtx);
   3049 	if (sc->sc_ich_nvmmtx)
   3050 		mutex_obj_free(sc->sc_ich_nvmmtx);
   3051 
   3052 	return 0;
   3053 }
   3054 
   3055 static bool
   3056 wm_suspend(device_t self, const pmf_qual_t *qual)
   3057 {
   3058 	struct wm_softc *sc = device_private(self);
   3059 
   3060 	wm_release_manageability(sc);
   3061 	wm_release_hw_control(sc);
   3062 	wm_enable_wakeup(sc);
   3063 
   3064 	return true;
   3065 }
   3066 
   3067 static bool
   3068 wm_resume(device_t self, const pmf_qual_t *qual)
   3069 {
   3070 	struct wm_softc *sc = device_private(self);
   3071 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3072 	pcireg_t reg;
   3073 	char buf[256];
   3074 
   3075 	reg = CSR_READ(sc, WMREG_WUS);
   3076 	if (reg != 0) {
   3077 		snprintb(buf, sizeof(buf), WUS_FLAGS, reg);
   3078 		device_printf(sc->sc_dev, "wakeup status %s\n", buf);
   3079 		CSR_WRITE(sc, WMREG_WUS, 0xffffffff); /* W1C */
   3080 	}
   3081 
   3082 	if (sc->sc_type >= WM_T_PCH2)
   3083 		wm_resume_workarounds_pchlan(sc);
   3084 	if ((ifp->if_flags & IFF_UP) == 0) {
   3085 		wm_reset(sc);
   3086 		/* Non-AMT based hardware can now take control from firmware */
   3087 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   3088 			wm_get_hw_control(sc);
   3089 		wm_init_manageability(sc);
   3090 	} else {
   3091 		/*
   3092 		 * We called pmf_class_network_register(), so if_init() is
   3093 		 * automatically called when IFF_UP. wm_reset(),
   3094 		 * wm_get_hw_control() and wm_init_manageability() are called
   3095 		 * via wm_init().
   3096 		 */
   3097 	}
   3098 
   3099 	return true;
   3100 }
   3101 
   3102 /*
   3103  * wm_watchdog:		[ifnet interface function]
   3104  *
   3105  *	Watchdog timer handler.
   3106  */
   3107 static void
   3108 wm_watchdog(struct ifnet *ifp)
   3109 {
   3110 	int qid;
   3111 	struct wm_softc *sc = ifp->if_softc;
   3112 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3113 
   3114 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3115 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3116 
   3117 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3118 	}
   3119 
   3120 	/*
   3121 	 * IF any of queues hanged up, reset the interface.
   3122 	 */
   3123 	if (hang_queue != 0) {
   3124 		(void) wm_init(ifp);
   3125 
   3126 		/*
   3127 		 * There are still some upper layer processing which call
   3128 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3129 		 */
   3130 		/* Try to get more packets going. */
   3131 		ifp->if_start(ifp);
   3132 	}
   3133 }
   3134 
   3135 
   3136 static void
   3137 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3138 {
   3139 
   3140 	mutex_enter(txq->txq_lock);
   3141 	if (txq->txq_sending &&
   3142 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3143 		wm_watchdog_txq_locked(ifp, txq, hang);
   3144 	}
   3145 	mutex_exit(txq->txq_lock);
   3146 }
   3147 
   3148 static void
   3149 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3150     uint16_t *hang)
   3151 {
   3152 	struct wm_softc *sc = ifp->if_softc;
   3153 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3154 
   3155 	KASSERT(mutex_owned(txq->txq_lock));
   3156 
   3157 	/*
   3158 	 * Since we're using delayed interrupts, sweep up
   3159 	 * before we report an error.
   3160 	 */
   3161 	wm_txeof(txq, UINT_MAX);
   3162 
   3163 	if (txq->txq_sending)
   3164 		*hang |= __BIT(wmq->wmq_id);
   3165 
   3166 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3167 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3168 		    device_xname(sc->sc_dev));
   3169 	} else {
   3170 #ifdef WM_DEBUG
   3171 		int i, j;
   3172 		struct wm_txsoft *txs;
   3173 #endif
   3174 		log(LOG_ERR,
   3175 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3176 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3177 		    txq->txq_next);
   3178 		ifp->if_oerrors++;
   3179 #ifdef WM_DEBUG
   3180 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3181 		    i = WM_NEXTTXS(txq, i)) {
   3182 		    txs = &txq->txq_soft[i];
   3183 		    printf("txs %d tx %d -> %d\n",
   3184 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3185 		    for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3186 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3187 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3188 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3189 				    printf("\t %#08x%08x\n",
   3190 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3191 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3192 			    } else {
   3193 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3194 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3195 					txq->txq_descs[j].wtx_addr.wa_low);
   3196 				    printf("\t %#04x%02x%02x%08x\n",
   3197 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3198 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3199 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3200 					txq->txq_descs[j].wtx_cmdlen);
   3201 			    }
   3202 			if (j == txs->txs_lastdesc)
   3203 				break;
   3204 			}
   3205 		}
   3206 #endif
   3207 	}
   3208 }
   3209 
   3210 /*
   3211  * wm_tick:
   3212  *
   3213  *	One second timer, used to check link status, sweep up
   3214  *	completed transmit jobs, etc.
   3215  */
   3216 static void
   3217 wm_tick(void *arg)
   3218 {
   3219 	struct wm_softc *sc = arg;
   3220 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3221 #ifndef WM_MPSAFE
   3222 	int s = splnet();
   3223 #endif
   3224 
   3225 	WM_CORE_LOCK(sc);
   3226 
   3227 	if (sc->sc_core_stopping) {
   3228 		WM_CORE_UNLOCK(sc);
   3229 #ifndef WM_MPSAFE
   3230 		splx(s);
   3231 #endif
   3232 		return;
   3233 	}
   3234 
   3235 	if (sc->sc_type >= WM_T_82542_2_1) {
   3236 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3237 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3238 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3239 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3240 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3241 	}
   3242 
   3243 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3244 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3245 	    + CSR_READ(sc, WMREG_CRCERRS)
   3246 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3247 	    + CSR_READ(sc, WMREG_SYMERRC)
   3248 	    + CSR_READ(sc, WMREG_RXERRC)
   3249 	    + CSR_READ(sc, WMREG_SEC)
   3250 	    + CSR_READ(sc, WMREG_CEXTERR)
   3251 	    + CSR_READ(sc, WMREG_RLEC);
   3252 	/*
   3253 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3254 	 * memory. It does not mean the number of dropped packet. Because
   3255 	 * ethernet controller can receive packets in such case if there is
   3256 	 * space in phy's FIFO.
   3257 	 *
   3258 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3259 	 * own EVCNT instead of if_iqdrops.
   3260 	 */
   3261 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3262 
   3263 	if (sc->sc_flags & WM_F_HAS_MII)
   3264 		mii_tick(&sc->sc_mii);
   3265 	else if ((sc->sc_type >= WM_T_82575)
   3266 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3267 		wm_serdes_tick(sc);
   3268 	else
   3269 		wm_tbi_tick(sc);
   3270 
   3271 	WM_CORE_UNLOCK(sc);
   3272 
   3273 	wm_watchdog(ifp);
   3274 
   3275 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3276 }
   3277 
   3278 static int
   3279 wm_ifflags_cb(struct ethercom *ec)
   3280 {
   3281 	struct ifnet *ifp = &ec->ec_if;
   3282 	struct wm_softc *sc = ifp->if_softc;
   3283 	int iffchange, ecchange;
   3284 	bool needreset = false;
   3285 	int rc = 0;
   3286 
   3287 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3288 		device_xname(sc->sc_dev), __func__));
   3289 
   3290 	WM_CORE_LOCK(sc);
   3291 
   3292 	/*
   3293 	 * Check for if_flags.
   3294 	 * Main usage is to prevent linkdown when opening bpf.
   3295 	 */
   3296 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
   3297 	sc->sc_if_flags = ifp->if_flags;
   3298 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3299 		needreset = true;
   3300 		goto ec;
   3301 	}
   3302 
   3303 	/* iff related updates */
   3304 	if ((iffchange & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3305 		wm_set_filter(sc);
   3306 
   3307 	wm_set_vlan(sc);
   3308 
   3309 ec:
   3310 	/* Check for ec_capenable. */
   3311 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
   3312 	sc->sc_ec_capenable = ec->ec_capenable;
   3313 	if ((ecchange & ~ETHERCAP_EEE) != 0) {
   3314 		needreset = true;
   3315 		goto out;
   3316 	}
   3317 
   3318 	/* ec related updates */
   3319 	wm_set_eee(sc);
   3320 
   3321 out:
   3322 	if (needreset)
   3323 		rc = ENETRESET;
   3324 	WM_CORE_UNLOCK(sc);
   3325 
   3326 	return rc;
   3327 }
   3328 
   3329 /*
   3330  * wm_ioctl:		[ifnet interface function]
   3331  *
   3332  *	Handle control requests from the operator.
   3333  */
   3334 static int
   3335 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3336 {
   3337 	struct wm_softc *sc = ifp->if_softc;
   3338 	struct ifreq *ifr = (struct ifreq *) data;
   3339 	struct ifaddr *ifa = (struct ifaddr *)data;
   3340 	struct sockaddr_dl *sdl;
   3341 	int s, error;
   3342 
   3343 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3344 		device_xname(sc->sc_dev), __func__));
   3345 
   3346 #ifndef WM_MPSAFE
   3347 	s = splnet();
   3348 #endif
   3349 	switch (cmd) {
   3350 	case SIOCSIFMEDIA:
   3351 	case SIOCGIFMEDIA:
   3352 		WM_CORE_LOCK(sc);
   3353 		/* Flow control requires full-duplex mode. */
   3354 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3355 		    (ifr->ifr_media & IFM_FDX) == 0)
   3356 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3357 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3358 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3359 				/* We can do both TXPAUSE and RXPAUSE. */
   3360 				ifr->ifr_media |=
   3361 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3362 			}
   3363 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3364 		}
   3365 		WM_CORE_UNLOCK(sc);
   3366 #ifdef WM_MPSAFE
   3367 		s = splnet();
   3368 #endif
   3369 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3370 #ifdef WM_MPSAFE
   3371 		splx(s);
   3372 #endif
   3373 		break;
   3374 	case SIOCINITIFADDR:
   3375 		WM_CORE_LOCK(sc);
   3376 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3377 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3378 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3379 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3380 			/* unicast address is first multicast entry */
   3381 			wm_set_filter(sc);
   3382 			error = 0;
   3383 			WM_CORE_UNLOCK(sc);
   3384 			break;
   3385 		}
   3386 		WM_CORE_UNLOCK(sc);
   3387 		/*FALLTHROUGH*/
   3388 	default:
   3389 #ifdef WM_MPSAFE
   3390 		s = splnet();
   3391 #endif
   3392 		/* It may call wm_start, so unlock here */
   3393 		error = ether_ioctl(ifp, cmd, data);
   3394 #ifdef WM_MPSAFE
   3395 		splx(s);
   3396 #endif
   3397 		if (error != ENETRESET)
   3398 			break;
   3399 
   3400 		error = 0;
   3401 
   3402 		if (cmd == SIOCSIFCAP)
   3403 			error = (*ifp->if_init)(ifp);
   3404 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3405 			;
   3406 		else if (ifp->if_flags & IFF_RUNNING) {
   3407 			/*
   3408 			 * Multicast list has changed; set the hardware filter
   3409 			 * accordingly.
   3410 			 */
   3411 			WM_CORE_LOCK(sc);
   3412 			wm_set_filter(sc);
   3413 			WM_CORE_UNLOCK(sc);
   3414 		}
   3415 		break;
   3416 	}
   3417 
   3418 #ifndef WM_MPSAFE
   3419 	splx(s);
   3420 #endif
   3421 	return error;
   3422 }
   3423 
   3424 /* MAC address related */
   3425 
   3426 /*
   3427  * Get the offset of MAC address and return it.
   3428  * If error occured, use offset 0.
   3429  */
   3430 static uint16_t
   3431 wm_check_alt_mac_addr(struct wm_softc *sc)
   3432 {
   3433 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3434 	uint16_t offset = NVM_OFF_MACADDR;
   3435 
   3436 	/* Try to read alternative MAC address pointer */
   3437 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3438 		return 0;
   3439 
   3440 	/* Check pointer if it's valid or not. */
   3441 	if ((offset == 0x0000) || (offset == 0xffff))
   3442 		return 0;
   3443 
   3444 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3445 	/*
   3446 	 * Check whether alternative MAC address is valid or not.
   3447 	 * Some cards have non 0xffff pointer but those don't use
   3448 	 * alternative MAC address in reality.
   3449 	 *
   3450 	 * Check whether the broadcast bit is set or not.
   3451 	 */
   3452 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3453 		if (((myea[0] & 0xff) & 0x01) == 0)
   3454 			return offset; /* Found */
   3455 
   3456 	/* Not found */
   3457 	return 0;
   3458 }
   3459 
   3460 static int
   3461 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3462 {
   3463 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3464 	uint16_t offset = NVM_OFF_MACADDR;
   3465 	int do_invert = 0;
   3466 
   3467 	switch (sc->sc_type) {
   3468 	case WM_T_82580:
   3469 	case WM_T_I350:
   3470 	case WM_T_I354:
   3471 		/* EEPROM Top Level Partitioning */
   3472 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3473 		break;
   3474 	case WM_T_82571:
   3475 	case WM_T_82575:
   3476 	case WM_T_82576:
   3477 	case WM_T_80003:
   3478 	case WM_T_I210:
   3479 	case WM_T_I211:
   3480 		offset = wm_check_alt_mac_addr(sc);
   3481 		if (offset == 0)
   3482 			if ((sc->sc_funcid & 0x01) == 1)
   3483 				do_invert = 1;
   3484 		break;
   3485 	default:
   3486 		if ((sc->sc_funcid & 0x01) == 1)
   3487 			do_invert = 1;
   3488 		break;
   3489 	}
   3490 
   3491 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3492 		goto bad;
   3493 
   3494 	enaddr[0] = myea[0] & 0xff;
   3495 	enaddr[1] = myea[0] >> 8;
   3496 	enaddr[2] = myea[1] & 0xff;
   3497 	enaddr[3] = myea[1] >> 8;
   3498 	enaddr[4] = myea[2] & 0xff;
   3499 	enaddr[5] = myea[2] >> 8;
   3500 
   3501 	/*
   3502 	 * Toggle the LSB of the MAC address on the second port
   3503 	 * of some dual port cards.
   3504 	 */
   3505 	if (do_invert != 0)
   3506 		enaddr[5] ^= 1;
   3507 
   3508 	return 0;
   3509 
   3510  bad:
   3511 	return -1;
   3512 }
   3513 
   3514 /*
   3515  * wm_set_ral:
   3516  *
   3517  *	Set an entery in the receive address list.
   3518  */
   3519 static void
   3520 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3521 {
   3522 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3523 	uint32_t wlock_mac;
   3524 	int rv;
   3525 
   3526 	if (enaddr != NULL) {
   3527 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3528 		    (enaddr[3] << 24);
   3529 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3530 		ral_hi |= RAL_AV;
   3531 	} else {
   3532 		ral_lo = 0;
   3533 		ral_hi = 0;
   3534 	}
   3535 
   3536 	switch (sc->sc_type) {
   3537 	case WM_T_82542_2_0:
   3538 	case WM_T_82542_2_1:
   3539 	case WM_T_82543:
   3540 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3541 		CSR_WRITE_FLUSH(sc);
   3542 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3543 		CSR_WRITE_FLUSH(sc);
   3544 		break;
   3545 	case WM_T_PCH2:
   3546 	case WM_T_PCH_LPT:
   3547 	case WM_T_PCH_SPT:
   3548 	case WM_T_PCH_CNP:
   3549 		if (idx == 0) {
   3550 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3551 			CSR_WRITE_FLUSH(sc);
   3552 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3553 			CSR_WRITE_FLUSH(sc);
   3554 			return;
   3555 		}
   3556 		if (sc->sc_type != WM_T_PCH2) {
   3557 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3558 			    FWSM_WLOCK_MAC);
   3559 			addrl = WMREG_SHRAL(idx - 1);
   3560 			addrh = WMREG_SHRAH(idx - 1);
   3561 		} else {
   3562 			wlock_mac = 0;
   3563 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3564 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3565 		}
   3566 
   3567 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3568 			rv = wm_get_swflag_ich8lan(sc);
   3569 			if (rv != 0)
   3570 				return;
   3571 			CSR_WRITE(sc, addrl, ral_lo);
   3572 			CSR_WRITE_FLUSH(sc);
   3573 			CSR_WRITE(sc, addrh, ral_hi);
   3574 			CSR_WRITE_FLUSH(sc);
   3575 			wm_put_swflag_ich8lan(sc);
   3576 		}
   3577 
   3578 		break;
   3579 	default:
   3580 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3581 		CSR_WRITE_FLUSH(sc);
   3582 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3583 		CSR_WRITE_FLUSH(sc);
   3584 		break;
   3585 	}
   3586 }
   3587 
   3588 /*
   3589  * wm_mchash:
   3590  *
   3591  *	Compute the hash of the multicast address for the 4096-bit
   3592  *	multicast filter.
   3593  */
   3594 static uint32_t
   3595 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3596 {
   3597 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3598 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3599 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3600 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3601 	uint32_t hash;
   3602 
   3603 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3604 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3605 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3606 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3607 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3608 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3609 		return (hash & 0x3ff);
   3610 	}
   3611 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3612 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3613 
   3614 	return (hash & 0xfff);
   3615 }
   3616 
   3617 /*
   3618  *
   3619  *
   3620  */
   3621 static int
   3622 wm_rar_count(struct wm_softc *sc)
   3623 {
   3624 	int size;
   3625 
   3626 	switch (sc->sc_type) {
   3627 	case WM_T_ICH8:
   3628 		size = WM_RAL_TABSIZE_ICH8 -1;
   3629 		break;
   3630 	case WM_T_ICH9:
   3631 	case WM_T_ICH10:
   3632 	case WM_T_PCH:
   3633 		size = WM_RAL_TABSIZE_ICH8;
   3634 		break;
   3635 	case WM_T_PCH2:
   3636 		size = WM_RAL_TABSIZE_PCH2;
   3637 		break;
   3638 	case WM_T_PCH_LPT:
   3639 	case WM_T_PCH_SPT:
   3640 	case WM_T_PCH_CNP:
   3641 		size = WM_RAL_TABSIZE_PCH_LPT;
   3642 		break;
   3643 	case WM_T_82575:
   3644 		size = WM_RAL_TABSIZE_82575;
   3645 		break;
   3646 	case WM_T_82576:
   3647 	case WM_T_82580:
   3648 		size = WM_RAL_TABSIZE_82576;
   3649 		break;
   3650 	case WM_T_I350:
   3651 	case WM_T_I354:
   3652 		size = WM_RAL_TABSIZE_I350;
   3653 		break;
   3654 	default:
   3655 		size = WM_RAL_TABSIZE;
   3656 	}
   3657 
   3658 	return size;
   3659 }
   3660 
   3661 /*
   3662  * wm_set_filter:
   3663  *
   3664  *	Set up the receive filter.
   3665  */
   3666 static void
   3667 wm_set_filter(struct wm_softc *sc)
   3668 {
   3669 	struct ethercom *ec = &sc->sc_ethercom;
   3670 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3671 	struct ether_multi *enm;
   3672 	struct ether_multistep step;
   3673 	bus_addr_t mta_reg;
   3674 	uint32_t hash, reg, bit;
   3675 	int i, size, ralmax;
   3676 
   3677 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3678 		device_xname(sc->sc_dev), __func__));
   3679 
   3680 	if (sc->sc_type >= WM_T_82544)
   3681 		mta_reg = WMREG_CORDOVA_MTA;
   3682 	else
   3683 		mta_reg = WMREG_MTA;
   3684 
   3685 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3686 
   3687 	if (ifp->if_flags & IFF_BROADCAST)
   3688 		sc->sc_rctl |= RCTL_BAM;
   3689 	if (ifp->if_flags & IFF_PROMISC) {
   3690 		sc->sc_rctl |= RCTL_UPE;
   3691 		goto allmulti;
   3692 	}
   3693 
   3694 	/*
   3695 	 * Set the station address in the first RAL slot, and
   3696 	 * clear the remaining slots.
   3697 	 */
   3698 	size = wm_rar_count(sc);
   3699 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3700 
   3701 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3702 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3703 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3704 		switch (i) {
   3705 		case 0:
   3706 			/* We can use all entries */
   3707 			ralmax = size;
   3708 			break;
   3709 		case 1:
   3710 			/* Only RAR[0] */
   3711 			ralmax = 1;
   3712 			break;
   3713 		default:
   3714 			/* available SHRA + RAR[0] */
   3715 			ralmax = i + 1;
   3716 		}
   3717 	} else
   3718 		ralmax = size;
   3719 	for (i = 1; i < size; i++) {
   3720 		if (i < ralmax)
   3721 			wm_set_ral(sc, NULL, i);
   3722 	}
   3723 
   3724 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3725 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3726 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3727 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3728 		size = WM_ICH8_MC_TABSIZE;
   3729 	else
   3730 		size = WM_MC_TABSIZE;
   3731 	/* Clear out the multicast table. */
   3732 	for (i = 0; i < size; i++) {
   3733 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3734 		CSR_WRITE_FLUSH(sc);
   3735 	}
   3736 
   3737 	ETHER_LOCK(ec);
   3738 	ETHER_FIRST_MULTI(step, ec, enm);
   3739 	while (enm != NULL) {
   3740 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3741 			ETHER_UNLOCK(ec);
   3742 			/*
   3743 			 * We must listen to a range of multicast addresses.
   3744 			 * For now, just accept all multicasts, rather than
   3745 			 * trying to set only those filter bits needed to match
   3746 			 * the range.  (At this time, the only use of address
   3747 			 * ranges is for IP multicast routing, for which the
   3748 			 * range is big enough to require all bits set.)
   3749 			 */
   3750 			goto allmulti;
   3751 		}
   3752 
   3753 		hash = wm_mchash(sc, enm->enm_addrlo);
   3754 
   3755 		reg = (hash >> 5);
   3756 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3757 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3758 		    || (sc->sc_type == WM_T_PCH2)
   3759 		    || (sc->sc_type == WM_T_PCH_LPT)
   3760 		    || (sc->sc_type == WM_T_PCH_SPT)
   3761 		    || (sc->sc_type == WM_T_PCH_CNP))
   3762 			reg &= 0x1f;
   3763 		else
   3764 			reg &= 0x7f;
   3765 		bit = hash & 0x1f;
   3766 
   3767 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3768 		hash |= 1U << bit;
   3769 
   3770 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3771 			/*
   3772 			 * 82544 Errata 9: Certain register cannot be written
   3773 			 * with particular alignments in PCI-X bus operation
   3774 			 * (FCAH, MTA and VFTA).
   3775 			 */
   3776 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3777 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3778 			CSR_WRITE_FLUSH(sc);
   3779 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3780 			CSR_WRITE_FLUSH(sc);
   3781 		} else {
   3782 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3783 			CSR_WRITE_FLUSH(sc);
   3784 		}
   3785 
   3786 		ETHER_NEXT_MULTI(step, enm);
   3787 	}
   3788 	ETHER_UNLOCK(ec);
   3789 
   3790 	ifp->if_flags &= ~IFF_ALLMULTI;
   3791 	goto setit;
   3792 
   3793  allmulti:
   3794 	ifp->if_flags |= IFF_ALLMULTI;
   3795 	sc->sc_rctl |= RCTL_MPE;
   3796 
   3797  setit:
   3798 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3799 }
   3800 
   3801 /* Reset and init related */
   3802 
   3803 static void
   3804 wm_set_vlan(struct wm_softc *sc)
   3805 {
   3806 
   3807 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3808 		device_xname(sc->sc_dev), __func__));
   3809 
   3810 	/* Deal with VLAN enables. */
   3811 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3812 		sc->sc_ctrl |= CTRL_VME;
   3813 	else
   3814 		sc->sc_ctrl &= ~CTRL_VME;
   3815 
   3816 	/* Write the control registers. */
   3817 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3818 }
   3819 
   3820 static void
   3821 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3822 {
   3823 	uint32_t gcr;
   3824 	pcireg_t ctrl2;
   3825 
   3826 	gcr = CSR_READ(sc, WMREG_GCR);
   3827 
   3828 	/* Only take action if timeout value is defaulted to 0 */
   3829 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3830 		goto out;
   3831 
   3832 	if ((gcr & GCR_CAP_VER2) == 0) {
   3833 		gcr |= GCR_CMPL_TMOUT_10MS;
   3834 		goto out;
   3835 	}
   3836 
   3837 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3838 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3839 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3840 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3841 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3842 
   3843 out:
   3844 	/* Disable completion timeout resend */
   3845 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3846 
   3847 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3848 }
   3849 
   3850 void
   3851 wm_get_auto_rd_done(struct wm_softc *sc)
   3852 {
   3853 	int i;
   3854 
   3855 	/* wait for eeprom to reload */
   3856 	switch (sc->sc_type) {
   3857 	case WM_T_82571:
   3858 	case WM_T_82572:
   3859 	case WM_T_82573:
   3860 	case WM_T_82574:
   3861 	case WM_T_82583:
   3862 	case WM_T_82575:
   3863 	case WM_T_82576:
   3864 	case WM_T_82580:
   3865 	case WM_T_I350:
   3866 	case WM_T_I354:
   3867 	case WM_T_I210:
   3868 	case WM_T_I211:
   3869 	case WM_T_80003:
   3870 	case WM_T_ICH8:
   3871 	case WM_T_ICH9:
   3872 		for (i = 0; i < 10; i++) {
   3873 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3874 				break;
   3875 			delay(1000);
   3876 		}
   3877 		if (i == 10) {
   3878 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3879 			    "complete\n", device_xname(sc->sc_dev));
   3880 		}
   3881 		break;
   3882 	default:
   3883 		break;
   3884 	}
   3885 }
   3886 
   3887 void
   3888 wm_lan_init_done(struct wm_softc *sc)
   3889 {
   3890 	uint32_t reg = 0;
   3891 	int i;
   3892 
   3893 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3894 		device_xname(sc->sc_dev), __func__));
   3895 
   3896 	/* Wait for eeprom to reload */
   3897 	switch (sc->sc_type) {
   3898 	case WM_T_ICH10:
   3899 	case WM_T_PCH:
   3900 	case WM_T_PCH2:
   3901 	case WM_T_PCH_LPT:
   3902 	case WM_T_PCH_SPT:
   3903 	case WM_T_PCH_CNP:
   3904 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3905 			reg = CSR_READ(sc, WMREG_STATUS);
   3906 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3907 				break;
   3908 			delay(100);
   3909 		}
   3910 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3911 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3912 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3913 		}
   3914 		break;
   3915 	default:
   3916 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3917 		    __func__);
   3918 		break;
   3919 	}
   3920 
   3921 	reg &= ~STATUS_LAN_INIT_DONE;
   3922 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3923 }
   3924 
   3925 void
   3926 wm_get_cfg_done(struct wm_softc *sc)
   3927 {
   3928 	int mask;
   3929 	uint32_t reg;
   3930 	int i;
   3931 
   3932 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3933 		device_xname(sc->sc_dev), __func__));
   3934 
   3935 	/* Wait for eeprom to reload */
   3936 	switch (sc->sc_type) {
   3937 	case WM_T_82542_2_0:
   3938 	case WM_T_82542_2_1:
   3939 		/* null */
   3940 		break;
   3941 	case WM_T_82543:
   3942 	case WM_T_82544:
   3943 	case WM_T_82540:
   3944 	case WM_T_82545:
   3945 	case WM_T_82545_3:
   3946 	case WM_T_82546:
   3947 	case WM_T_82546_3:
   3948 	case WM_T_82541:
   3949 	case WM_T_82541_2:
   3950 	case WM_T_82547:
   3951 	case WM_T_82547_2:
   3952 	case WM_T_82573:
   3953 	case WM_T_82574:
   3954 	case WM_T_82583:
   3955 		/* generic */
   3956 		delay(10*1000);
   3957 		break;
   3958 	case WM_T_80003:
   3959 	case WM_T_82571:
   3960 	case WM_T_82572:
   3961 	case WM_T_82575:
   3962 	case WM_T_82576:
   3963 	case WM_T_82580:
   3964 	case WM_T_I350:
   3965 	case WM_T_I354:
   3966 	case WM_T_I210:
   3967 	case WM_T_I211:
   3968 		if (sc->sc_type == WM_T_82571) {
   3969 			/* Only 82571 shares port 0 */
   3970 			mask = EEMNGCTL_CFGDONE_0;
   3971 		} else
   3972 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3973 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3974 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3975 				break;
   3976 			delay(1000);
   3977 		}
   3978 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3979 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3980 				device_xname(sc->sc_dev), __func__));
   3981 		}
   3982 		break;
   3983 	case WM_T_ICH8:
   3984 	case WM_T_ICH9:
   3985 	case WM_T_ICH10:
   3986 	case WM_T_PCH:
   3987 	case WM_T_PCH2:
   3988 	case WM_T_PCH_LPT:
   3989 	case WM_T_PCH_SPT:
   3990 	case WM_T_PCH_CNP:
   3991 		delay(10*1000);
   3992 		if (sc->sc_type >= WM_T_ICH10)
   3993 			wm_lan_init_done(sc);
   3994 		else
   3995 			wm_get_auto_rd_done(sc);
   3996 
   3997 		/* Clear PHY Reset Asserted bit */
   3998 		reg = CSR_READ(sc, WMREG_STATUS);
   3999 		if ((reg & STATUS_PHYRA) != 0)
   4000 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   4001 		break;
   4002 	default:
   4003 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   4004 		    __func__);
   4005 		break;
   4006 	}
   4007 }
   4008 
   4009 void
   4010 wm_phy_post_reset(struct wm_softc *sc)
   4011 {
   4012 	uint32_t reg;
   4013 
   4014 	/* This function is only for ICH8 and newer. */
   4015 	if (sc->sc_type < WM_T_ICH8)
   4016 		return;
   4017 
   4018 	if (wm_phy_resetisblocked(sc)) {
   4019 		/* XXX */
   4020 		device_printf(sc->sc_dev, "PHY is blocked\n");
   4021 		return;
   4022 	}
   4023 
   4024 	/* Allow time for h/w to get to quiescent state after reset */
   4025 	delay(10*1000);
   4026 
   4027 	/* Perform any necessary post-reset workarounds */
   4028 	if (sc->sc_type == WM_T_PCH)
   4029 		wm_hv_phy_workarounds_ich8lan(sc);
   4030 	else if (sc->sc_type == WM_T_PCH2)
   4031 		wm_lv_phy_workarounds_ich8lan(sc);
   4032 
   4033 	/* Clear the host wakeup bit after lcd reset */
   4034 	if (sc->sc_type >= WM_T_PCH) {
   4035 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4036 		    BM_PORT_GEN_CFG);
   4037 		reg &= ~BM_WUC_HOST_WU_BIT;
   4038 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4039 		    BM_PORT_GEN_CFG, reg);
   4040 	}
   4041 
   4042 	/* Configure the LCD with the extended configuration region in NVM */
   4043 	wm_init_lcd_from_nvm(sc);
   4044 
   4045 	/* Configure the LCD with the OEM bits in NVM */
   4046 	wm_oem_bits_config_ich8lan(sc, true);
   4047 
   4048 	if (sc->sc_type == WM_T_PCH2) {
   4049 		/* Ungate automatic PHY configuration on non-managed 82579 */
   4050 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   4051 			delay(10 * 1000);
   4052 			wm_gate_hw_phy_config_ich8lan(sc, false);
   4053 		}
   4054 		/* XXX Set EEE LPI Update Timer to 200usec */
   4055 	}
   4056 }
   4057 
   4058 /* Only for PCH and newer */
   4059 static int
   4060 wm_write_smbus_addr(struct wm_softc *sc)
   4061 {
   4062 	uint32_t strap, freq;
   4063 	uint16_t phy_data;
   4064 	int rv;
   4065 
   4066 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4067 		device_xname(sc->sc_dev), __func__));
   4068 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   4069 
   4070 	strap = CSR_READ(sc, WMREG_STRAP);
   4071 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   4072 
   4073 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   4074 	if (rv != 0)
   4075 		return -1;
   4076 
   4077 	phy_data &= ~HV_SMB_ADDR_ADDR;
   4078 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   4079 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   4080 
   4081 	if (sc->sc_phytype == WMPHY_I217) {
   4082 		/* Restore SMBus frequency */
   4083 		if (freq --) {
   4084 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   4085 			    | HV_SMB_ADDR_FREQ_HIGH);
   4086 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   4087 			    HV_SMB_ADDR_FREQ_LOW);
   4088 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   4089 			    HV_SMB_ADDR_FREQ_HIGH);
   4090 		} else {
   4091 			DPRINTF(WM_DEBUG_INIT,
   4092 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   4093 				device_xname(sc->sc_dev), __func__));
   4094 		}
   4095 	}
   4096 
   4097 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   4098 	    phy_data);
   4099 }
   4100 
   4101 void
   4102 wm_init_lcd_from_nvm(struct wm_softc *sc)
   4103 {
   4104 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   4105 	uint16_t phy_page = 0;
   4106 
   4107 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4108 		device_xname(sc->sc_dev), __func__));
   4109 
   4110 	switch (sc->sc_type) {
   4111 	case WM_T_ICH8:
   4112 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   4113 		    || (sc->sc_phytype != WMPHY_IGP_3))
   4114 			return;
   4115 
   4116 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   4117 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   4118 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   4119 			break;
   4120 		}
   4121 		/* FALLTHROUGH */
   4122 	case WM_T_PCH:
   4123 	case WM_T_PCH2:
   4124 	case WM_T_PCH_LPT:
   4125 	case WM_T_PCH_SPT:
   4126 	case WM_T_PCH_CNP:
   4127 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   4128 		break;
   4129 	default:
   4130 		return;
   4131 	}
   4132 
   4133 	sc->phy.acquire(sc);
   4134 
   4135 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   4136 	if ((reg & sw_cfg_mask) == 0)
   4137 		goto release;
   4138 
   4139 	/*
   4140 	 * Make sure HW does not configure LCD from PHY extended configuration
   4141 	 * before SW configuration
   4142 	 */
   4143 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   4144 	if ((sc->sc_type < WM_T_PCH2)
   4145 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   4146 		goto release;
   4147 
   4148 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4149 		device_xname(sc->sc_dev), __func__));
   4150 	/* word_addr is in DWORD */
   4151 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4152 
   4153 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4154 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4155 	if (cnf_size == 0)
   4156 		goto release;
   4157 
   4158 	if (((sc->sc_type == WM_T_PCH)
   4159 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4160 	    || (sc->sc_type > WM_T_PCH)) {
   4161 		/*
   4162 		 * HW configures the SMBus address and LEDs when the OEM and
   4163 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4164 		 * are cleared, SW will configure them instead.
   4165 		 */
   4166 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4167 			device_xname(sc->sc_dev), __func__));
   4168 		wm_write_smbus_addr(sc);
   4169 
   4170 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4171 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   4172 	}
   4173 
   4174 	/* Configure LCD from extended configuration region. */
   4175 	for (i = 0; i < cnf_size; i++) {
   4176 		uint16_t reg_data, reg_addr;
   4177 
   4178 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4179 			goto release;
   4180 
   4181 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4182 			goto release;
   4183 
   4184 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4185 			phy_page = reg_data;
   4186 
   4187 		reg_addr &= IGPHY_MAXREGADDR;
   4188 		reg_addr |= phy_page;
   4189 
   4190 		KASSERT(sc->phy.writereg_locked != NULL);
   4191 		sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr, reg_data);
   4192 	}
   4193 
   4194 release:
   4195 	sc->phy.release(sc);
   4196 	return;
   4197 }
   4198 
   4199 /*
   4200  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4201  *  @sc:       pointer to the HW structure
   4202  *  @d0_state: boolean if entering d0 or d3 device state
   4203  *
   4204  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4205  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4206  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4207  */
   4208 int
   4209 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4210 {
   4211 	uint32_t mac_reg;
   4212 	uint16_t oem_reg;
   4213 	int rv;
   4214 
   4215 	if (sc->sc_type < WM_T_PCH)
   4216 		return 0;
   4217 
   4218 	rv = sc->phy.acquire(sc);
   4219 	if (rv != 0)
   4220 		return rv;
   4221 
   4222 	if (sc->sc_type == WM_T_PCH) {
   4223 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4224 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4225 			goto release;
   4226 	}
   4227 
   4228 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4229 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4230 		goto release;
   4231 
   4232 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4233 
   4234 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4235 	if (rv != 0)
   4236 		goto release;
   4237 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4238 
   4239 	if (d0_state) {
   4240 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4241 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4242 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4243 			oem_reg |= HV_OEM_BITS_LPLU;
   4244 	} else {
   4245 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4246 		    != 0)
   4247 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4248 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4249 		    != 0)
   4250 			oem_reg |= HV_OEM_BITS_LPLU;
   4251 	}
   4252 
   4253 	/* Set Restart auto-neg to activate the bits */
   4254 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4255 	    && (wm_phy_resetisblocked(sc) == false))
   4256 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4257 
   4258 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4259 
   4260 release:
   4261 	sc->phy.release(sc);
   4262 
   4263 	return rv;
   4264 }
   4265 
   4266 /* Init hardware bits */
   4267 void
   4268 wm_initialize_hardware_bits(struct wm_softc *sc)
   4269 {
   4270 	uint32_t tarc0, tarc1, reg;
   4271 
   4272 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4273 		device_xname(sc->sc_dev), __func__));
   4274 
   4275 	/* For 82571 variant, 80003 and ICHs */
   4276 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4277 	    || (sc->sc_type >= WM_T_80003)) {
   4278 
   4279 		/* Transmit Descriptor Control 0 */
   4280 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4281 		reg |= TXDCTL_COUNT_DESC;
   4282 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4283 
   4284 		/* Transmit Descriptor Control 1 */
   4285 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4286 		reg |= TXDCTL_COUNT_DESC;
   4287 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4288 
   4289 		/* TARC0 */
   4290 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4291 		switch (sc->sc_type) {
   4292 		case WM_T_82571:
   4293 		case WM_T_82572:
   4294 		case WM_T_82573:
   4295 		case WM_T_82574:
   4296 		case WM_T_82583:
   4297 		case WM_T_80003:
   4298 			/* Clear bits 30..27 */
   4299 			tarc0 &= ~__BITS(30, 27);
   4300 			break;
   4301 		default:
   4302 			break;
   4303 		}
   4304 
   4305 		switch (sc->sc_type) {
   4306 		case WM_T_82571:
   4307 		case WM_T_82572:
   4308 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4309 
   4310 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4311 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4312 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4313 			/* 8257[12] Errata No.7 */
   4314 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4315 
   4316 			/* TARC1 bit 28 */
   4317 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4318 				tarc1 &= ~__BIT(28);
   4319 			else
   4320 				tarc1 |= __BIT(28);
   4321 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4322 
   4323 			/*
   4324 			 * 8257[12] Errata No.13
   4325 			 * Disable Dyamic Clock Gating.
   4326 			 */
   4327 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4328 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4329 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4330 			break;
   4331 		case WM_T_82573:
   4332 		case WM_T_82574:
   4333 		case WM_T_82583:
   4334 			if ((sc->sc_type == WM_T_82574)
   4335 			    || (sc->sc_type == WM_T_82583))
   4336 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4337 
   4338 			/* Extended Device Control */
   4339 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4340 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4341 			reg |= __BIT(22);	/* Set bit 22 */
   4342 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4343 
   4344 			/* Device Control */
   4345 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4346 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4347 
   4348 			/* PCIe Control Register */
   4349 			/*
   4350 			 * 82573 Errata (unknown).
   4351 			 *
   4352 			 * 82574 Errata 25 and 82583 Errata 12
   4353 			 * "Dropped Rx Packets":
   4354 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4355 			 */
   4356 			reg = CSR_READ(sc, WMREG_GCR);
   4357 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4358 			CSR_WRITE(sc, WMREG_GCR, reg);
   4359 
   4360 			if ((sc->sc_type == WM_T_82574)
   4361 			    || (sc->sc_type == WM_T_82583)) {
   4362 				/*
   4363 				 * Document says this bit must be set for
   4364 				 * proper operation.
   4365 				 */
   4366 				reg = CSR_READ(sc, WMREG_GCR);
   4367 				reg |= __BIT(22);
   4368 				CSR_WRITE(sc, WMREG_GCR, reg);
   4369 
   4370 				/*
   4371 				 * Apply workaround for hardware errata
   4372 				 * documented in errata docs Fixes issue where
   4373 				 * some error prone or unreliable PCIe
   4374 				 * completions are occurring, particularly
   4375 				 * with ASPM enabled. Without fix, issue can
   4376 				 * cause Tx timeouts.
   4377 				 */
   4378 				reg = CSR_READ(sc, WMREG_GCR2);
   4379 				reg |= __BIT(0);
   4380 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4381 			}
   4382 			break;
   4383 		case WM_T_80003:
   4384 			/* TARC0 */
   4385 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4386 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4387 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4388 
   4389 			/* TARC1 bit 28 */
   4390 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4391 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4392 				tarc1 &= ~__BIT(28);
   4393 			else
   4394 				tarc1 |= __BIT(28);
   4395 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4396 			break;
   4397 		case WM_T_ICH8:
   4398 		case WM_T_ICH9:
   4399 		case WM_T_ICH10:
   4400 		case WM_T_PCH:
   4401 		case WM_T_PCH2:
   4402 		case WM_T_PCH_LPT:
   4403 		case WM_T_PCH_SPT:
   4404 		case WM_T_PCH_CNP:
   4405 			/* TARC0 */
   4406 			if (sc->sc_type == WM_T_ICH8) {
   4407 				/* Set TARC0 bits 29 and 28 */
   4408 				tarc0 |= __BITS(29, 28);
   4409 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4410 				tarc0 |= __BIT(29);
   4411 				/*
   4412 				 *  Drop bit 28. From Linux.
   4413 				 * See I218/I219 spec update
   4414 				 * "5. Buffer Overrun While the I219 is
   4415 				 * Processing DMA Transactions"
   4416 				 */
   4417 				tarc0 &= ~__BIT(28);
   4418 			}
   4419 			/* Set TARC0 bits 23,24,26,27 */
   4420 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4421 
   4422 			/* CTRL_EXT */
   4423 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4424 			reg |= __BIT(22);	/* Set bit 22 */
   4425 			/*
   4426 			 * Enable PHY low-power state when MAC is at D3
   4427 			 * w/o WoL
   4428 			 */
   4429 			if (sc->sc_type >= WM_T_PCH)
   4430 				reg |= CTRL_EXT_PHYPDEN;
   4431 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4432 
   4433 			/* TARC1 */
   4434 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4435 			/* bit 28 */
   4436 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4437 				tarc1 &= ~__BIT(28);
   4438 			else
   4439 				tarc1 |= __BIT(28);
   4440 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4441 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4442 
   4443 			/* Device Status */
   4444 			if (sc->sc_type == WM_T_ICH8) {
   4445 				reg = CSR_READ(sc, WMREG_STATUS);
   4446 				reg &= ~__BIT(31);
   4447 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4448 
   4449 			}
   4450 
   4451 			/* IOSFPC */
   4452 			if (sc->sc_type == WM_T_PCH_SPT) {
   4453 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4454 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4455 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4456 			}
   4457 			/*
   4458 			 * Work-around descriptor data corruption issue during
   4459 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4460 			 * capability.
   4461 			 */
   4462 			reg = CSR_READ(sc, WMREG_RFCTL);
   4463 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4464 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4465 			break;
   4466 		default:
   4467 			break;
   4468 		}
   4469 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4470 
   4471 		switch (sc->sc_type) {
   4472 		/*
   4473 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4474 		 * Avoid RSS Hash Value bug.
   4475 		 */
   4476 		case WM_T_82571:
   4477 		case WM_T_82572:
   4478 		case WM_T_82573:
   4479 		case WM_T_80003:
   4480 		case WM_T_ICH8:
   4481 			reg = CSR_READ(sc, WMREG_RFCTL);
   4482 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4483 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4484 			break;
   4485 		case WM_T_82574:
   4486 			/* use extened Rx descriptor. */
   4487 			reg = CSR_READ(sc, WMREG_RFCTL);
   4488 			reg |= WMREG_RFCTL_EXSTEN;
   4489 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4490 			break;
   4491 		default:
   4492 			break;
   4493 		}
   4494 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4495 		/*
   4496 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4497 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4498 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4499 		 * Correctly by the Device"
   4500 		 *
   4501 		 * I354(C2000) Errata AVR53:
   4502 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4503 		 * Hang"
   4504 		 */
   4505 		reg = CSR_READ(sc, WMREG_RFCTL);
   4506 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4507 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4508 	}
   4509 }
   4510 
   4511 static uint32_t
   4512 wm_rxpbs_adjust_82580(uint32_t val)
   4513 {
   4514 	uint32_t rv = 0;
   4515 
   4516 	if (val < __arraycount(wm_82580_rxpbs_table))
   4517 		rv = wm_82580_rxpbs_table[val];
   4518 
   4519 	return rv;
   4520 }
   4521 
   4522 /*
   4523  * wm_reset_phy:
   4524  *
   4525  *	generic PHY reset function.
   4526  *	Same as e1000_phy_hw_reset_generic()
   4527  */
   4528 static int
   4529 wm_reset_phy(struct wm_softc *sc)
   4530 {
   4531 	uint32_t reg;
   4532 
   4533 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4534 		device_xname(sc->sc_dev), __func__));
   4535 	if (wm_phy_resetisblocked(sc))
   4536 		return -1;
   4537 
   4538 	sc->phy.acquire(sc);
   4539 
   4540 	reg = CSR_READ(sc, WMREG_CTRL);
   4541 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4542 	CSR_WRITE_FLUSH(sc);
   4543 
   4544 	delay(sc->phy.reset_delay_us);
   4545 
   4546 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4547 	CSR_WRITE_FLUSH(sc);
   4548 
   4549 	delay(150);
   4550 
   4551 	sc->phy.release(sc);
   4552 
   4553 	wm_get_cfg_done(sc);
   4554 	wm_phy_post_reset(sc);
   4555 
   4556 	return 0;
   4557 }
   4558 
   4559 /*
   4560  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4561  * so it is enough to check sc->sc_queue[0] only.
   4562  */
   4563 static void
   4564 wm_flush_desc_rings(struct wm_softc *sc)
   4565 {
   4566 	pcireg_t preg;
   4567 	uint32_t reg;
   4568 	struct wm_txqueue *txq;
   4569 	wiseman_txdesc_t *txd;
   4570 	int nexttx;
   4571 	uint32_t rctl;
   4572 
   4573 	/* First, disable MULR fix in FEXTNVM11 */
   4574 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4575 	reg |= FEXTNVM11_DIS_MULRFIX;
   4576 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4577 
   4578 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4579 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4580 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4581 		return;
   4582 
   4583 	/* TX */
   4584 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4585 	    device_xname(sc->sc_dev), preg, reg);
   4586 	reg = CSR_READ(sc, WMREG_TCTL);
   4587 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4588 
   4589 	txq = &sc->sc_queue[0].wmq_txq;
   4590 	nexttx = txq->txq_next;
   4591 	txd = &txq->txq_descs[nexttx];
   4592 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4593 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4594 	txd->wtx_fields.wtxu_status = 0;
   4595 	txd->wtx_fields.wtxu_options = 0;
   4596 	txd->wtx_fields.wtxu_vlan = 0;
   4597 
   4598 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4599 	    BUS_SPACE_BARRIER_WRITE);
   4600 
   4601 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4602 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4603 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4604 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4605 	delay(250);
   4606 
   4607 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4608 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4609 		return;
   4610 
   4611 	/* RX */
   4612 	printf("%s: Need RX flush (reg = %08x)\n",
   4613 	    device_xname(sc->sc_dev), preg);
   4614 	rctl = CSR_READ(sc, WMREG_RCTL);
   4615 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4616 	CSR_WRITE_FLUSH(sc);
   4617 	delay(150);
   4618 
   4619 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4620 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4621 	reg &= 0xffffc000;
   4622 	/*
   4623 	 * update thresholds: prefetch threshold to 31, host threshold
   4624 	 * to 1 and make sure the granularity is "descriptors" and not
   4625 	 * "cache lines"
   4626 	 */
   4627 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4628 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4629 
   4630 	/*
   4631 	 * momentarily enable the RX ring for the changes to take
   4632 	 * effect
   4633 	 */
   4634 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4635 	CSR_WRITE_FLUSH(sc);
   4636 	delay(150);
   4637 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4638 }
   4639 
   4640 /*
   4641  * wm_reset:
   4642  *
   4643  *	Reset the i82542 chip.
   4644  */
   4645 static void
   4646 wm_reset(struct wm_softc *sc)
   4647 {
   4648 	int phy_reset = 0;
   4649 	int i, error = 0;
   4650 	uint32_t reg;
   4651 	uint16_t kmreg;
   4652 	int rv;
   4653 
   4654 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4655 		device_xname(sc->sc_dev), __func__));
   4656 	KASSERT(sc->sc_type != 0);
   4657 
   4658 	/*
   4659 	 * Allocate on-chip memory according to the MTU size.
   4660 	 * The Packet Buffer Allocation register must be written
   4661 	 * before the chip is reset.
   4662 	 */
   4663 	switch (sc->sc_type) {
   4664 	case WM_T_82547:
   4665 	case WM_T_82547_2:
   4666 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4667 		    PBA_22K : PBA_30K;
   4668 		for (i = 0; i < sc->sc_nqueues; i++) {
   4669 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4670 			txq->txq_fifo_head = 0;
   4671 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4672 			txq->txq_fifo_size =
   4673 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4674 			txq->txq_fifo_stall = 0;
   4675 		}
   4676 		break;
   4677 	case WM_T_82571:
   4678 	case WM_T_82572:
   4679 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4680 	case WM_T_80003:
   4681 		sc->sc_pba = PBA_32K;
   4682 		break;
   4683 	case WM_T_82573:
   4684 		sc->sc_pba = PBA_12K;
   4685 		break;
   4686 	case WM_T_82574:
   4687 	case WM_T_82583:
   4688 		sc->sc_pba = PBA_20K;
   4689 		break;
   4690 	case WM_T_82576:
   4691 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4692 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4693 		break;
   4694 	case WM_T_82580:
   4695 	case WM_T_I350:
   4696 	case WM_T_I354:
   4697 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4698 		break;
   4699 	case WM_T_I210:
   4700 	case WM_T_I211:
   4701 		sc->sc_pba = PBA_34K;
   4702 		break;
   4703 	case WM_T_ICH8:
   4704 		/* Workaround for a bit corruption issue in FIFO memory */
   4705 		sc->sc_pba = PBA_8K;
   4706 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4707 		break;
   4708 	case WM_T_ICH9:
   4709 	case WM_T_ICH10:
   4710 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4711 		    PBA_14K : PBA_10K;
   4712 		break;
   4713 	case WM_T_PCH:
   4714 	case WM_T_PCH2:	/* XXX 14K? */
   4715 	case WM_T_PCH_LPT:
   4716 	case WM_T_PCH_SPT:
   4717 	case WM_T_PCH_CNP:
   4718 		sc->sc_pba = PBA_26K;
   4719 		break;
   4720 	default:
   4721 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4722 		    PBA_40K : PBA_48K;
   4723 		break;
   4724 	}
   4725 	/*
   4726 	 * Only old or non-multiqueue devices have the PBA register
   4727 	 * XXX Need special handling for 82575.
   4728 	 */
   4729 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4730 	    || (sc->sc_type == WM_T_82575))
   4731 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4732 
   4733 	/* Prevent the PCI-E bus from sticking */
   4734 	if (sc->sc_flags & WM_F_PCIE) {
   4735 		int timeout = 800;
   4736 
   4737 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4738 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4739 
   4740 		while (timeout--) {
   4741 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4742 			    == 0)
   4743 				break;
   4744 			delay(100);
   4745 		}
   4746 		if (timeout == 0)
   4747 			device_printf(sc->sc_dev,
   4748 			    "failed to disable busmastering\n");
   4749 	}
   4750 
   4751 	/* Set the completion timeout for interface */
   4752 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4753 	    || (sc->sc_type == WM_T_82580)
   4754 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4755 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4756 		wm_set_pcie_completion_timeout(sc);
   4757 
   4758 	/* Clear interrupt */
   4759 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4760 	if (wm_is_using_msix(sc)) {
   4761 		if (sc->sc_type != WM_T_82574) {
   4762 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4763 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4764 		} else
   4765 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4766 	}
   4767 
   4768 	/* Stop the transmit and receive processes. */
   4769 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4770 	sc->sc_rctl &= ~RCTL_EN;
   4771 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4772 	CSR_WRITE_FLUSH(sc);
   4773 
   4774 	/* XXX set_tbi_sbp_82543() */
   4775 
   4776 	delay(10*1000);
   4777 
   4778 	/* Must acquire the MDIO ownership before MAC reset */
   4779 	switch (sc->sc_type) {
   4780 	case WM_T_82573:
   4781 	case WM_T_82574:
   4782 	case WM_T_82583:
   4783 		error = wm_get_hw_semaphore_82573(sc);
   4784 		break;
   4785 	default:
   4786 		break;
   4787 	}
   4788 
   4789 	/*
   4790 	 * 82541 Errata 29? & 82547 Errata 28?
   4791 	 * See also the description about PHY_RST bit in CTRL register
   4792 	 * in 8254x_GBe_SDM.pdf.
   4793 	 */
   4794 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4795 		CSR_WRITE(sc, WMREG_CTRL,
   4796 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4797 		CSR_WRITE_FLUSH(sc);
   4798 		delay(5000);
   4799 	}
   4800 
   4801 	switch (sc->sc_type) {
   4802 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4803 	case WM_T_82541:
   4804 	case WM_T_82541_2:
   4805 	case WM_T_82547:
   4806 	case WM_T_82547_2:
   4807 		/*
   4808 		 * On some chipsets, a reset through a memory-mapped write
   4809 		 * cycle can cause the chip to reset before completing the
   4810 		 * write cycle. This causes major headache that can be avoided
   4811 		 * by issuing the reset via indirect register writes through
   4812 		 * I/O space.
   4813 		 *
   4814 		 * So, if we successfully mapped the I/O BAR at attach time,
   4815 		 * use that. Otherwise, try our luck with a memory-mapped
   4816 		 * reset.
   4817 		 */
   4818 		if (sc->sc_flags & WM_F_IOH_VALID)
   4819 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4820 		else
   4821 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4822 		break;
   4823 	case WM_T_82545_3:
   4824 	case WM_T_82546_3:
   4825 		/* Use the shadow control register on these chips. */
   4826 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4827 		break;
   4828 	case WM_T_80003:
   4829 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4830 		sc->phy.acquire(sc);
   4831 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4832 		sc->phy.release(sc);
   4833 		break;
   4834 	case WM_T_ICH8:
   4835 	case WM_T_ICH9:
   4836 	case WM_T_ICH10:
   4837 	case WM_T_PCH:
   4838 	case WM_T_PCH2:
   4839 	case WM_T_PCH_LPT:
   4840 	case WM_T_PCH_SPT:
   4841 	case WM_T_PCH_CNP:
   4842 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4843 		if (wm_phy_resetisblocked(sc) == false) {
   4844 			/*
   4845 			 * Gate automatic PHY configuration by hardware on
   4846 			 * non-managed 82579
   4847 			 */
   4848 			if ((sc->sc_type == WM_T_PCH2)
   4849 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4850 				== 0))
   4851 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4852 
   4853 			reg |= CTRL_PHY_RESET;
   4854 			phy_reset = 1;
   4855 		} else
   4856 			printf("XXX reset is blocked!!!\n");
   4857 		sc->phy.acquire(sc);
   4858 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4859 		/* Don't insert a completion barrier when reset */
   4860 		delay(20*1000);
   4861 		mutex_exit(sc->sc_ich_phymtx);
   4862 		break;
   4863 	case WM_T_82580:
   4864 	case WM_T_I350:
   4865 	case WM_T_I354:
   4866 	case WM_T_I210:
   4867 	case WM_T_I211:
   4868 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4869 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4870 			CSR_WRITE_FLUSH(sc);
   4871 		delay(5000);
   4872 		break;
   4873 	case WM_T_82542_2_0:
   4874 	case WM_T_82542_2_1:
   4875 	case WM_T_82543:
   4876 	case WM_T_82540:
   4877 	case WM_T_82545:
   4878 	case WM_T_82546:
   4879 	case WM_T_82571:
   4880 	case WM_T_82572:
   4881 	case WM_T_82573:
   4882 	case WM_T_82574:
   4883 	case WM_T_82575:
   4884 	case WM_T_82576:
   4885 	case WM_T_82583:
   4886 	default:
   4887 		/* Everything else can safely use the documented method. */
   4888 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4889 		break;
   4890 	}
   4891 
   4892 	/* Must release the MDIO ownership after MAC reset */
   4893 	switch (sc->sc_type) {
   4894 	case WM_T_82573:
   4895 	case WM_T_82574:
   4896 	case WM_T_82583:
   4897 		if (error == 0)
   4898 			wm_put_hw_semaphore_82573(sc);
   4899 		break;
   4900 	default:
   4901 		break;
   4902 	}
   4903 
   4904 	/* Set Phy Config Counter to 50msec */
   4905 	if (sc->sc_type == WM_T_PCH2) {
   4906 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4907 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4908 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4909 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4910 	}
   4911 
   4912 	if (phy_reset != 0)
   4913 		wm_get_cfg_done(sc);
   4914 
   4915 	/* reload EEPROM */
   4916 	switch (sc->sc_type) {
   4917 	case WM_T_82542_2_0:
   4918 	case WM_T_82542_2_1:
   4919 	case WM_T_82543:
   4920 	case WM_T_82544:
   4921 		delay(10);
   4922 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4923 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4924 		CSR_WRITE_FLUSH(sc);
   4925 		delay(2000);
   4926 		break;
   4927 	case WM_T_82540:
   4928 	case WM_T_82545:
   4929 	case WM_T_82545_3:
   4930 	case WM_T_82546:
   4931 	case WM_T_82546_3:
   4932 		delay(5*1000);
   4933 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4934 		break;
   4935 	case WM_T_82541:
   4936 	case WM_T_82541_2:
   4937 	case WM_T_82547:
   4938 	case WM_T_82547_2:
   4939 		delay(20000);
   4940 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4941 		break;
   4942 	case WM_T_82571:
   4943 	case WM_T_82572:
   4944 	case WM_T_82573:
   4945 	case WM_T_82574:
   4946 	case WM_T_82583:
   4947 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4948 			delay(10);
   4949 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4950 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4951 			CSR_WRITE_FLUSH(sc);
   4952 		}
   4953 		/* check EECD_EE_AUTORD */
   4954 		wm_get_auto_rd_done(sc);
   4955 		/*
   4956 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4957 		 * is set.
   4958 		 */
   4959 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4960 		    || (sc->sc_type == WM_T_82583))
   4961 			delay(25*1000);
   4962 		break;
   4963 	case WM_T_82575:
   4964 	case WM_T_82576:
   4965 	case WM_T_82580:
   4966 	case WM_T_I350:
   4967 	case WM_T_I354:
   4968 	case WM_T_I210:
   4969 	case WM_T_I211:
   4970 	case WM_T_80003:
   4971 		/* check EECD_EE_AUTORD */
   4972 		wm_get_auto_rd_done(sc);
   4973 		break;
   4974 	case WM_T_ICH8:
   4975 	case WM_T_ICH9:
   4976 	case WM_T_ICH10:
   4977 	case WM_T_PCH:
   4978 	case WM_T_PCH2:
   4979 	case WM_T_PCH_LPT:
   4980 	case WM_T_PCH_SPT:
   4981 	case WM_T_PCH_CNP:
   4982 		break;
   4983 	default:
   4984 		panic("%s: unknown type\n", __func__);
   4985 	}
   4986 
   4987 	/* Check whether EEPROM is present or not */
   4988 	switch (sc->sc_type) {
   4989 	case WM_T_82575:
   4990 	case WM_T_82576:
   4991 	case WM_T_82580:
   4992 	case WM_T_I350:
   4993 	case WM_T_I354:
   4994 	case WM_T_ICH8:
   4995 	case WM_T_ICH9:
   4996 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4997 			/* Not found */
   4998 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4999 			if (sc->sc_type == WM_T_82575)
   5000 				wm_reset_init_script_82575(sc);
   5001 		}
   5002 		break;
   5003 	default:
   5004 		break;
   5005 	}
   5006 
   5007 	if (phy_reset != 0)
   5008 		wm_phy_post_reset(sc);
   5009 
   5010 	if ((sc->sc_type == WM_T_82580)
   5011 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   5012 		/* clear global device reset status bit */
   5013 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   5014 	}
   5015 
   5016 	/* Clear any pending interrupt events. */
   5017 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5018 	reg = CSR_READ(sc, WMREG_ICR);
   5019 	if (wm_is_using_msix(sc)) {
   5020 		if (sc->sc_type != WM_T_82574) {
   5021 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5022 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5023 		} else
   5024 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5025 	}
   5026 
   5027 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5028 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5029 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5030 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   5031 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5032 		reg |= KABGTXD_BGSQLBIAS;
   5033 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5034 	}
   5035 
   5036 	/* reload sc_ctrl */
   5037 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   5038 
   5039 	wm_set_eee(sc);
   5040 
   5041 	/*
   5042 	 * For PCH, this write will make sure that any noise will be detected
   5043 	 * as a CRC error and be dropped rather than show up as a bad packet
   5044 	 * to the DMA engine
   5045 	 */
   5046 	if (sc->sc_type == WM_T_PCH)
   5047 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   5048 
   5049 	if (sc->sc_type >= WM_T_82544)
   5050 		CSR_WRITE(sc, WMREG_WUC, 0);
   5051 
   5052 	if (sc->sc_type < WM_T_82575)
   5053 		wm_disable_aspm(sc); /* Workaround for some chips */
   5054 
   5055 	wm_reset_mdicnfg_82580(sc);
   5056 
   5057 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   5058 		wm_pll_workaround_i210(sc);
   5059 
   5060 	if (sc->sc_type == WM_T_80003) {
   5061 		/* default to TRUE to enable the MDIC W/A */
   5062 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   5063 
   5064 		rv = wm_kmrn_readreg(sc,
   5065 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   5066 		if (rv == 0) {
   5067 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   5068 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   5069 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   5070 			else
   5071 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   5072 		}
   5073 	}
   5074 }
   5075 
   5076 /*
   5077  * wm_add_rxbuf:
   5078  *
   5079  *	Add a receive buffer to the indiciated descriptor.
   5080  */
   5081 static int
   5082 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   5083 {
   5084 	struct wm_softc *sc = rxq->rxq_sc;
   5085 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   5086 	struct mbuf *m;
   5087 	int error;
   5088 
   5089 	KASSERT(mutex_owned(rxq->rxq_lock));
   5090 
   5091 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   5092 	if (m == NULL)
   5093 		return ENOBUFS;
   5094 
   5095 	MCLGET(m, M_DONTWAIT);
   5096 	if ((m->m_flags & M_EXT) == 0) {
   5097 		m_freem(m);
   5098 		return ENOBUFS;
   5099 	}
   5100 
   5101 	if (rxs->rxs_mbuf != NULL)
   5102 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5103 
   5104 	rxs->rxs_mbuf = m;
   5105 
   5106 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   5107 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   5108 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   5109 	if (error) {
   5110 		/* XXX XXX XXX */
   5111 		aprint_error_dev(sc->sc_dev,
   5112 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   5113 		panic("wm_add_rxbuf");
   5114 	}
   5115 
   5116 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   5117 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   5118 
   5119 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5120 		if ((sc->sc_rctl & RCTL_EN) != 0)
   5121 			wm_init_rxdesc(rxq, idx);
   5122 	} else
   5123 		wm_init_rxdesc(rxq, idx);
   5124 
   5125 	return 0;
   5126 }
   5127 
   5128 /*
   5129  * wm_rxdrain:
   5130  *
   5131  *	Drain the receive queue.
   5132  */
   5133 static void
   5134 wm_rxdrain(struct wm_rxqueue *rxq)
   5135 {
   5136 	struct wm_softc *sc = rxq->rxq_sc;
   5137 	struct wm_rxsoft *rxs;
   5138 	int i;
   5139 
   5140 	KASSERT(mutex_owned(rxq->rxq_lock));
   5141 
   5142 	for (i = 0; i < WM_NRXDESC; i++) {
   5143 		rxs = &rxq->rxq_soft[i];
   5144 		if (rxs->rxs_mbuf != NULL) {
   5145 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5146 			m_freem(rxs->rxs_mbuf);
   5147 			rxs->rxs_mbuf = NULL;
   5148 		}
   5149 	}
   5150 }
   5151 
   5152 /*
   5153  * Setup registers for RSS.
   5154  *
   5155  * XXX not yet VMDq support
   5156  */
   5157 static void
   5158 wm_init_rss(struct wm_softc *sc)
   5159 {
   5160 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5161 	int i;
   5162 
   5163 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5164 
   5165 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5166 		int qid, reta_ent;
   5167 
   5168 		qid  = i % sc->sc_nqueues;
   5169 		switch (sc->sc_type) {
   5170 		case WM_T_82574:
   5171 			reta_ent = __SHIFTIN(qid,
   5172 			    RETA_ENT_QINDEX_MASK_82574);
   5173 			break;
   5174 		case WM_T_82575:
   5175 			reta_ent = __SHIFTIN(qid,
   5176 			    RETA_ENT_QINDEX1_MASK_82575);
   5177 			break;
   5178 		default:
   5179 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5180 			break;
   5181 		}
   5182 
   5183 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5184 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5185 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5186 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5187 	}
   5188 
   5189 	rss_getkey((uint8_t *)rss_key);
   5190 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5191 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5192 
   5193 	if (sc->sc_type == WM_T_82574)
   5194 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5195 	else
   5196 		mrqc = MRQC_ENABLE_RSS_MQ;
   5197 
   5198 	/*
   5199 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5200 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5201 	 */
   5202 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5203 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5204 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5205 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5206 
   5207 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5208 }
   5209 
   5210 /*
   5211  * Adjust TX and RX queue numbers which the system actulally uses.
   5212  *
   5213  * The numbers are affected by below parameters.
   5214  *     - The nubmer of hardware queues
   5215  *     - The number of MSI-X vectors (= "nvectors" argument)
   5216  *     - ncpu
   5217  */
   5218 static void
   5219 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5220 {
   5221 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5222 
   5223 	if (nvectors < 2) {
   5224 		sc->sc_nqueues = 1;
   5225 		return;
   5226 	}
   5227 
   5228 	switch (sc->sc_type) {
   5229 	case WM_T_82572:
   5230 		hw_ntxqueues = 2;
   5231 		hw_nrxqueues = 2;
   5232 		break;
   5233 	case WM_T_82574:
   5234 		hw_ntxqueues = 2;
   5235 		hw_nrxqueues = 2;
   5236 		break;
   5237 	case WM_T_82575:
   5238 		hw_ntxqueues = 4;
   5239 		hw_nrxqueues = 4;
   5240 		break;
   5241 	case WM_T_82576:
   5242 		hw_ntxqueues = 16;
   5243 		hw_nrxqueues = 16;
   5244 		break;
   5245 	case WM_T_82580:
   5246 	case WM_T_I350:
   5247 	case WM_T_I354:
   5248 		hw_ntxqueues = 8;
   5249 		hw_nrxqueues = 8;
   5250 		break;
   5251 	case WM_T_I210:
   5252 		hw_ntxqueues = 4;
   5253 		hw_nrxqueues = 4;
   5254 		break;
   5255 	case WM_T_I211:
   5256 		hw_ntxqueues = 2;
   5257 		hw_nrxqueues = 2;
   5258 		break;
   5259 		/*
   5260 		 * As below ethernet controllers does not support MSI-X,
   5261 		 * this driver let them not use multiqueue.
   5262 		 *     - WM_T_80003
   5263 		 *     - WM_T_ICH8
   5264 		 *     - WM_T_ICH9
   5265 		 *     - WM_T_ICH10
   5266 		 *     - WM_T_PCH
   5267 		 *     - WM_T_PCH2
   5268 		 *     - WM_T_PCH_LPT
   5269 		 */
   5270 	default:
   5271 		hw_ntxqueues = 1;
   5272 		hw_nrxqueues = 1;
   5273 		break;
   5274 	}
   5275 
   5276 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5277 
   5278 	/*
   5279 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5280 	 * the number of queues used actually.
   5281 	 */
   5282 	if (nvectors < hw_nqueues + 1)
   5283 		sc->sc_nqueues = nvectors - 1;
   5284 	else
   5285 		sc->sc_nqueues = hw_nqueues;
   5286 
   5287 	/*
   5288 	 * As queues more then cpus cannot improve scaling, we limit
   5289 	 * the number of queues used actually.
   5290 	 */
   5291 	if (ncpu < sc->sc_nqueues)
   5292 		sc->sc_nqueues = ncpu;
   5293 }
   5294 
   5295 static inline bool
   5296 wm_is_using_msix(struct wm_softc *sc)
   5297 {
   5298 
   5299 	return (sc->sc_nintrs > 1);
   5300 }
   5301 
   5302 static inline bool
   5303 wm_is_using_multiqueue(struct wm_softc *sc)
   5304 {
   5305 
   5306 	return (sc->sc_nqueues > 1);
   5307 }
   5308 
   5309 static int
   5310 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5311 {
   5312 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5313 	wmq->wmq_id = qidx;
   5314 	wmq->wmq_intr_idx = intr_idx;
   5315 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5316 #ifdef WM_MPSAFE
   5317 	    | SOFTINT_MPSAFE
   5318 #endif
   5319 	    , wm_handle_queue, wmq);
   5320 	if (wmq->wmq_si != NULL)
   5321 		return 0;
   5322 
   5323 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5324 	    wmq->wmq_id);
   5325 
   5326 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5327 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5328 	return ENOMEM;
   5329 }
   5330 
   5331 /*
   5332  * Both single interrupt MSI and INTx can use this function.
   5333  */
   5334 static int
   5335 wm_setup_legacy(struct wm_softc *sc)
   5336 {
   5337 	pci_chipset_tag_t pc = sc->sc_pc;
   5338 	const char *intrstr = NULL;
   5339 	char intrbuf[PCI_INTRSTR_LEN];
   5340 	int error;
   5341 
   5342 	error = wm_alloc_txrx_queues(sc);
   5343 	if (error) {
   5344 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5345 		    error);
   5346 		return ENOMEM;
   5347 	}
   5348 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5349 	    sizeof(intrbuf));
   5350 #ifdef WM_MPSAFE
   5351 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5352 #endif
   5353 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5354 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5355 	if (sc->sc_ihs[0] == NULL) {
   5356 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5357 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5358 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5359 		return ENOMEM;
   5360 	}
   5361 
   5362 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5363 	sc->sc_nintrs = 1;
   5364 
   5365 	return wm_softint_establish(sc, 0, 0);
   5366 }
   5367 
   5368 static int
   5369 wm_setup_msix(struct wm_softc *sc)
   5370 {
   5371 	void *vih;
   5372 	kcpuset_t *affinity;
   5373 	int qidx, error, intr_idx, txrx_established;
   5374 	pci_chipset_tag_t pc = sc->sc_pc;
   5375 	const char *intrstr = NULL;
   5376 	char intrbuf[PCI_INTRSTR_LEN];
   5377 	char intr_xname[INTRDEVNAMEBUF];
   5378 
   5379 	if (sc->sc_nqueues < ncpu) {
   5380 		/*
   5381 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5382 		 * interrupts start from CPU#1.
   5383 		 */
   5384 		sc->sc_affinity_offset = 1;
   5385 	} else {
   5386 		/*
   5387 		 * In this case, this device use all CPUs. So, we unify
   5388 		 * affinitied cpu_index to msix vector number for readability.
   5389 		 */
   5390 		sc->sc_affinity_offset = 0;
   5391 	}
   5392 
   5393 	error = wm_alloc_txrx_queues(sc);
   5394 	if (error) {
   5395 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5396 		    error);
   5397 		return ENOMEM;
   5398 	}
   5399 
   5400 	kcpuset_create(&affinity, false);
   5401 	intr_idx = 0;
   5402 
   5403 	/*
   5404 	 * TX and RX
   5405 	 */
   5406 	txrx_established = 0;
   5407 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5408 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5409 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5410 
   5411 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5412 		    sizeof(intrbuf));
   5413 #ifdef WM_MPSAFE
   5414 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5415 		    PCI_INTR_MPSAFE, true);
   5416 #endif
   5417 		memset(intr_xname, 0, sizeof(intr_xname));
   5418 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5419 		    device_xname(sc->sc_dev), qidx);
   5420 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5421 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5422 		if (vih == NULL) {
   5423 			aprint_error_dev(sc->sc_dev,
   5424 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5425 			    intrstr ? " at " : "",
   5426 			    intrstr ? intrstr : "");
   5427 
   5428 			goto fail;
   5429 		}
   5430 		kcpuset_zero(affinity);
   5431 		/* Round-robin affinity */
   5432 		kcpuset_set(affinity, affinity_to);
   5433 		error = interrupt_distribute(vih, affinity, NULL);
   5434 		if (error == 0) {
   5435 			aprint_normal_dev(sc->sc_dev,
   5436 			    "for TX and RX interrupting at %s affinity to %u\n",
   5437 			    intrstr, affinity_to);
   5438 		} else {
   5439 			aprint_normal_dev(sc->sc_dev,
   5440 			    "for TX and RX interrupting at %s\n", intrstr);
   5441 		}
   5442 		sc->sc_ihs[intr_idx] = vih;
   5443 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5444 			goto fail;
   5445 		txrx_established++;
   5446 		intr_idx++;
   5447 	}
   5448 
   5449 	/*
   5450 	 * LINK
   5451 	 */
   5452 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5453 	    sizeof(intrbuf));
   5454 #ifdef WM_MPSAFE
   5455 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5456 #endif
   5457 	memset(intr_xname, 0, sizeof(intr_xname));
   5458 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5459 	    device_xname(sc->sc_dev));
   5460 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5461 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5462 	if (vih == NULL) {
   5463 		aprint_error_dev(sc->sc_dev,
   5464 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5465 		    intrstr ? " at " : "",
   5466 		    intrstr ? intrstr : "");
   5467 
   5468 		goto fail;
   5469 	}
   5470 	/* keep default affinity to LINK interrupt */
   5471 	aprint_normal_dev(sc->sc_dev,
   5472 	    "for LINK interrupting at %s\n", intrstr);
   5473 	sc->sc_ihs[intr_idx] = vih;
   5474 	sc->sc_link_intr_idx = intr_idx;
   5475 
   5476 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5477 	kcpuset_destroy(affinity);
   5478 	return 0;
   5479 
   5480  fail:
   5481 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5482 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5483 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5484 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5485 	}
   5486 
   5487 	kcpuset_destroy(affinity);
   5488 	return ENOMEM;
   5489 }
   5490 
   5491 static void
   5492 wm_unset_stopping_flags(struct wm_softc *sc)
   5493 {
   5494 	int i;
   5495 
   5496 	KASSERT(WM_CORE_LOCKED(sc));
   5497 
   5498 	/*
   5499 	 * must unset stopping flags in ascending order.
   5500 	 */
   5501 	for (i = 0; i < sc->sc_nqueues; i++) {
   5502 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5503 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5504 
   5505 		mutex_enter(txq->txq_lock);
   5506 		txq->txq_stopping = false;
   5507 		mutex_exit(txq->txq_lock);
   5508 
   5509 		mutex_enter(rxq->rxq_lock);
   5510 		rxq->rxq_stopping = false;
   5511 		mutex_exit(rxq->rxq_lock);
   5512 	}
   5513 
   5514 	sc->sc_core_stopping = false;
   5515 }
   5516 
   5517 static void
   5518 wm_set_stopping_flags(struct wm_softc *sc)
   5519 {
   5520 	int i;
   5521 
   5522 	KASSERT(WM_CORE_LOCKED(sc));
   5523 
   5524 	sc->sc_core_stopping = true;
   5525 
   5526 	/*
   5527 	 * must set stopping flags in ascending order.
   5528 	 */
   5529 	for (i = 0; i < sc->sc_nqueues; i++) {
   5530 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5531 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5532 
   5533 		mutex_enter(rxq->rxq_lock);
   5534 		rxq->rxq_stopping = true;
   5535 		mutex_exit(rxq->rxq_lock);
   5536 
   5537 		mutex_enter(txq->txq_lock);
   5538 		txq->txq_stopping = true;
   5539 		mutex_exit(txq->txq_lock);
   5540 	}
   5541 }
   5542 
   5543 /*
   5544  * write interrupt interval value to ITR or EITR
   5545  */
   5546 static void
   5547 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5548 {
   5549 
   5550 	if (!wmq->wmq_set_itr)
   5551 		return;
   5552 
   5553 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5554 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5555 
   5556 		/*
   5557 		 * 82575 doesn't have CNT_INGR field.
   5558 		 * So, overwrite counter field by software.
   5559 		 */
   5560 		if (sc->sc_type == WM_T_82575)
   5561 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5562 		else
   5563 			eitr |= EITR_CNT_INGR;
   5564 
   5565 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5566 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5567 		/*
   5568 		 * 82574 has both ITR and EITR. SET EITR when we use
   5569 		 * the multi queue function with MSI-X.
   5570 		 */
   5571 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5572 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5573 	} else {
   5574 		KASSERT(wmq->wmq_id == 0);
   5575 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5576 	}
   5577 
   5578 	wmq->wmq_set_itr = false;
   5579 }
   5580 
   5581 /*
   5582  * TODO
   5583  * Below dynamic calculation of itr is almost the same as linux igb,
   5584  * however it does not fit to wm(4). So, we will have been disable AIM
   5585  * until we will find appropriate calculation of itr.
   5586  */
   5587 /*
   5588  * calculate interrupt interval value to be going to write register in
   5589  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5590  */
   5591 static void
   5592 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5593 {
   5594 #ifdef NOTYET
   5595 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5596 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5597 	uint32_t avg_size = 0;
   5598 	uint32_t new_itr;
   5599 
   5600 	if (rxq->rxq_packets)
   5601 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5602 	if (txq->txq_packets)
   5603 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5604 
   5605 	if (avg_size == 0) {
   5606 		new_itr = 450; /* restore default value */
   5607 		goto out;
   5608 	}
   5609 
   5610 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5611 	avg_size += 24;
   5612 
   5613 	/* Don't starve jumbo frames */
   5614 	avg_size = uimin(avg_size, 3000);
   5615 
   5616 	/* Give a little boost to mid-size frames */
   5617 	if ((avg_size > 300) && (avg_size < 1200))
   5618 		new_itr = avg_size / 3;
   5619 	else
   5620 		new_itr = avg_size / 2;
   5621 
   5622 out:
   5623 	/*
   5624 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5625 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5626 	 */
   5627 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5628 		new_itr *= 4;
   5629 
   5630 	if (new_itr != wmq->wmq_itr) {
   5631 		wmq->wmq_itr = new_itr;
   5632 		wmq->wmq_set_itr = true;
   5633 	} else
   5634 		wmq->wmq_set_itr = false;
   5635 
   5636 	rxq->rxq_packets = 0;
   5637 	rxq->rxq_bytes = 0;
   5638 	txq->txq_packets = 0;
   5639 	txq->txq_bytes = 0;
   5640 #endif
   5641 }
   5642 
   5643 /*
   5644  * wm_init:		[ifnet interface function]
   5645  *
   5646  *	Initialize the interface.
   5647  */
   5648 static int
   5649 wm_init(struct ifnet *ifp)
   5650 {
   5651 	struct wm_softc *sc = ifp->if_softc;
   5652 	int ret;
   5653 
   5654 	WM_CORE_LOCK(sc);
   5655 	ret = wm_init_locked(ifp);
   5656 	WM_CORE_UNLOCK(sc);
   5657 
   5658 	return ret;
   5659 }
   5660 
   5661 static int
   5662 wm_init_locked(struct ifnet *ifp)
   5663 {
   5664 	struct wm_softc *sc = ifp->if_softc;
   5665 	struct ethercom *ec = &sc->sc_ethercom;
   5666 	int i, j, trynum, error = 0;
   5667 	uint32_t reg;
   5668 
   5669 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5670 		device_xname(sc->sc_dev), __func__));
   5671 	KASSERT(WM_CORE_LOCKED(sc));
   5672 
   5673 	/*
   5674 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5675 	 * There is a small but measurable benefit to avoiding the adjusment
   5676 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5677 	 * on such platforms.  One possibility is that the DMA itself is
   5678 	 * slightly more efficient if the front of the entire packet (instead
   5679 	 * of the front of the headers) is aligned.
   5680 	 *
   5681 	 * Note we must always set align_tweak to 0 if we are using
   5682 	 * jumbo frames.
   5683 	 */
   5684 #ifdef __NO_STRICT_ALIGNMENT
   5685 	sc->sc_align_tweak = 0;
   5686 #else
   5687 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5688 		sc->sc_align_tweak = 0;
   5689 	else
   5690 		sc->sc_align_tweak = 2;
   5691 #endif /* __NO_STRICT_ALIGNMENT */
   5692 
   5693 	/* Cancel any pending I/O. */
   5694 	wm_stop_locked(ifp, 0);
   5695 
   5696 	/* update statistics before reset */
   5697 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5698 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5699 
   5700 	/* PCH_SPT hardware workaround */
   5701 	if (sc->sc_type == WM_T_PCH_SPT)
   5702 		wm_flush_desc_rings(sc);
   5703 
   5704 	/* Reset the chip to a known state. */
   5705 	wm_reset(sc);
   5706 
   5707 	/*
   5708 	 * AMT based hardware can now take control from firmware
   5709 	 * Do this after reset.
   5710 	 */
   5711 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5712 		wm_get_hw_control(sc);
   5713 
   5714 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5715 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5716 		wm_legacy_irq_quirk_spt(sc);
   5717 
   5718 	/* Init hardware bits */
   5719 	wm_initialize_hardware_bits(sc);
   5720 
   5721 	/* Reset the PHY. */
   5722 	if (sc->sc_flags & WM_F_HAS_MII)
   5723 		wm_gmii_reset(sc);
   5724 
   5725 	if (sc->sc_type >= WM_T_ICH8) {
   5726 		reg = CSR_READ(sc, WMREG_GCR);
   5727 		/*
   5728 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5729 		 * default after reset.
   5730 		 */
   5731 		if (sc->sc_type == WM_T_ICH8)
   5732 			reg |= GCR_NO_SNOOP_ALL;
   5733 		else
   5734 			reg &= ~GCR_NO_SNOOP_ALL;
   5735 		CSR_WRITE(sc, WMREG_GCR, reg);
   5736 	}
   5737 	if ((sc->sc_type >= WM_T_ICH8)
   5738 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5739 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5740 
   5741 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5742 		reg |= CTRL_EXT_RO_DIS;
   5743 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5744 	}
   5745 
   5746 	/* Calculate (E)ITR value */
   5747 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5748 		/*
   5749 		 * For NEWQUEUE's EITR (except for 82575).
   5750 		 * 82575's EITR should be set same throttling value as other
   5751 		 * old controllers' ITR because the interrupt/sec calculation
   5752 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5753 		 *
   5754 		 * 82574's EITR should be set same throttling value as ITR.
   5755 		 *
   5756 		 * For N interrupts/sec, set this value to:
   5757 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5758 		 */
   5759 		sc->sc_itr_init = 450;
   5760 	} else if (sc->sc_type >= WM_T_82543) {
   5761 		/*
   5762 		 * Set up the interrupt throttling register (units of 256ns)
   5763 		 * Note that a footnote in Intel's documentation says this
   5764 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5765 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5766 		 * that that is also true for the 1024ns units of the other
   5767 		 * interrupt-related timer registers -- so, really, we ought
   5768 		 * to divide this value by 4 when the link speed is low.
   5769 		 *
   5770 		 * XXX implement this division at link speed change!
   5771 		 */
   5772 
   5773 		/*
   5774 		 * For N interrupts/sec, set this value to:
   5775 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5776 		 * absolute and packet timer values to this value
   5777 		 * divided by 4 to get "simple timer" behavior.
   5778 		 */
   5779 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5780 	}
   5781 
   5782 	error = wm_init_txrx_queues(sc);
   5783 	if (error)
   5784 		goto out;
   5785 
   5786 	/*
   5787 	 * Clear out the VLAN table -- we don't use it (yet).
   5788 	 */
   5789 	CSR_WRITE(sc, WMREG_VET, 0);
   5790 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5791 		trynum = 10; /* Due to hw errata */
   5792 	else
   5793 		trynum = 1;
   5794 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5795 		for (j = 0; j < trynum; j++)
   5796 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5797 
   5798 	/*
   5799 	 * Set up flow-control parameters.
   5800 	 *
   5801 	 * XXX Values could probably stand some tuning.
   5802 	 */
   5803 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5804 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5805 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5806 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5807 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5808 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5809 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5810 	}
   5811 
   5812 	sc->sc_fcrtl = FCRTL_DFLT;
   5813 	if (sc->sc_type < WM_T_82543) {
   5814 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5815 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5816 	} else {
   5817 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5818 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5819 	}
   5820 
   5821 	if (sc->sc_type == WM_T_80003)
   5822 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5823 	else
   5824 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5825 
   5826 	/* Writes the control register. */
   5827 	wm_set_vlan(sc);
   5828 
   5829 	if (sc->sc_flags & WM_F_HAS_MII) {
   5830 		uint16_t kmreg;
   5831 
   5832 		switch (sc->sc_type) {
   5833 		case WM_T_80003:
   5834 		case WM_T_ICH8:
   5835 		case WM_T_ICH9:
   5836 		case WM_T_ICH10:
   5837 		case WM_T_PCH:
   5838 		case WM_T_PCH2:
   5839 		case WM_T_PCH_LPT:
   5840 		case WM_T_PCH_SPT:
   5841 		case WM_T_PCH_CNP:
   5842 			/*
   5843 			 * Set the mac to wait the maximum time between each
   5844 			 * iteration and increase the max iterations when
   5845 			 * polling the phy; this fixes erroneous timeouts at
   5846 			 * 10Mbps.
   5847 			 */
   5848 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5849 			    0xFFFF);
   5850 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5851 			    &kmreg);
   5852 			kmreg |= 0x3F;
   5853 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5854 			    kmreg);
   5855 			break;
   5856 		default:
   5857 			break;
   5858 		}
   5859 
   5860 		if (sc->sc_type == WM_T_80003) {
   5861 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5862 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5863 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5864 
   5865 			/* Bypass RX and TX FIFO's */
   5866 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5867 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5868 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5869 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5870 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5871 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5872 		}
   5873 	}
   5874 #if 0
   5875 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5876 #endif
   5877 
   5878 	/* Set up checksum offload parameters. */
   5879 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5880 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5881 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5882 		reg |= RXCSUM_IPOFL;
   5883 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5884 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5885 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5886 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5887 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5888 
   5889 	/* Set registers about MSI-X */
   5890 	if (wm_is_using_msix(sc)) {
   5891 		uint32_t ivar;
   5892 		struct wm_queue *wmq;
   5893 		int qid, qintr_idx;
   5894 
   5895 		if (sc->sc_type == WM_T_82575) {
   5896 			/* Interrupt control */
   5897 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5898 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5899 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5900 
   5901 			/* TX and RX */
   5902 			for (i = 0; i < sc->sc_nqueues; i++) {
   5903 				wmq = &sc->sc_queue[i];
   5904 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5905 				    EITR_TX_QUEUE(wmq->wmq_id)
   5906 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5907 			}
   5908 			/* Link status */
   5909 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5910 			    EITR_OTHER);
   5911 		} else if (sc->sc_type == WM_T_82574) {
   5912 			/* Interrupt control */
   5913 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5914 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5915 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5916 
   5917 			/*
   5918 			 * workaround issue with spurious interrupts
   5919 			 * in MSI-X mode.
   5920 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5921 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5922 			 */
   5923 			reg = CSR_READ(sc, WMREG_RFCTL);
   5924 			reg |= WMREG_RFCTL_ACKDIS;
   5925 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5926 
   5927 			ivar = 0;
   5928 			/* TX and RX */
   5929 			for (i = 0; i < sc->sc_nqueues; i++) {
   5930 				wmq = &sc->sc_queue[i];
   5931 				qid = wmq->wmq_id;
   5932 				qintr_idx = wmq->wmq_intr_idx;
   5933 
   5934 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5935 				    IVAR_TX_MASK_Q_82574(qid));
   5936 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5937 				    IVAR_RX_MASK_Q_82574(qid));
   5938 			}
   5939 			/* Link status */
   5940 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5941 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5942 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5943 		} else {
   5944 			/* Interrupt control */
   5945 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5946 			    | GPIE_EIAME | GPIE_PBA);
   5947 
   5948 			switch (sc->sc_type) {
   5949 			case WM_T_82580:
   5950 			case WM_T_I350:
   5951 			case WM_T_I354:
   5952 			case WM_T_I210:
   5953 			case WM_T_I211:
   5954 				/* TX and RX */
   5955 				for (i = 0; i < sc->sc_nqueues; i++) {
   5956 					wmq = &sc->sc_queue[i];
   5957 					qid = wmq->wmq_id;
   5958 					qintr_idx = wmq->wmq_intr_idx;
   5959 
   5960 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5961 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5962 					ivar |= __SHIFTIN((qintr_idx
   5963 						| IVAR_VALID),
   5964 					    IVAR_TX_MASK_Q(qid));
   5965 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5966 					ivar |= __SHIFTIN((qintr_idx
   5967 						| IVAR_VALID),
   5968 					    IVAR_RX_MASK_Q(qid));
   5969 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5970 				}
   5971 				break;
   5972 			case WM_T_82576:
   5973 				/* TX and RX */
   5974 				for (i = 0; i < sc->sc_nqueues; i++) {
   5975 					wmq = &sc->sc_queue[i];
   5976 					qid = wmq->wmq_id;
   5977 					qintr_idx = wmq->wmq_intr_idx;
   5978 
   5979 					ivar = CSR_READ(sc,
   5980 					    WMREG_IVAR_Q_82576(qid));
   5981 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5982 					ivar |= __SHIFTIN((qintr_idx
   5983 						| IVAR_VALID),
   5984 					    IVAR_TX_MASK_Q_82576(qid));
   5985 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5986 					ivar |= __SHIFTIN((qintr_idx
   5987 						| IVAR_VALID),
   5988 					    IVAR_RX_MASK_Q_82576(qid));
   5989 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5990 					    ivar);
   5991 				}
   5992 				break;
   5993 			default:
   5994 				break;
   5995 			}
   5996 
   5997 			/* Link status */
   5998 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5999 			    IVAR_MISC_OTHER);
   6000 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   6001 		}
   6002 
   6003 		if (wm_is_using_multiqueue(sc)) {
   6004 			wm_init_rss(sc);
   6005 
   6006 			/*
   6007 			** NOTE: Receive Full-Packet Checksum Offload
   6008 			** is mutually exclusive with Multiqueue. However
   6009 			** this is not the same as TCP/IP checksums which
   6010 			** still work.
   6011 			*/
   6012 			reg = CSR_READ(sc, WMREG_RXCSUM);
   6013 			reg |= RXCSUM_PCSD;
   6014 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   6015 		}
   6016 	}
   6017 
   6018 	/* Set up the interrupt registers. */
   6019 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6020 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   6021 	    ICR_RXO | ICR_RXT0;
   6022 	if (wm_is_using_msix(sc)) {
   6023 		uint32_t mask;
   6024 		struct wm_queue *wmq;
   6025 
   6026 		switch (sc->sc_type) {
   6027 		case WM_T_82574:
   6028 			mask = 0;
   6029 			for (i = 0; i < sc->sc_nqueues; i++) {
   6030 				wmq = &sc->sc_queue[i];
   6031 				mask |= ICR_TXQ(wmq->wmq_id);
   6032 				mask |= ICR_RXQ(wmq->wmq_id);
   6033 			}
   6034 			mask |= ICR_OTHER;
   6035 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   6036 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   6037 			break;
   6038 		default:
   6039 			if (sc->sc_type == WM_T_82575) {
   6040 				mask = 0;
   6041 				for (i = 0; i < sc->sc_nqueues; i++) {
   6042 					wmq = &sc->sc_queue[i];
   6043 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   6044 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   6045 				}
   6046 				mask |= EITR_OTHER;
   6047 			} else {
   6048 				mask = 0;
   6049 				for (i = 0; i < sc->sc_nqueues; i++) {
   6050 					wmq = &sc->sc_queue[i];
   6051 					mask |= 1 << wmq->wmq_intr_idx;
   6052 				}
   6053 				mask |= 1 << sc->sc_link_intr_idx;
   6054 			}
   6055 			CSR_WRITE(sc, WMREG_EIAC, mask);
   6056 			CSR_WRITE(sc, WMREG_EIAM, mask);
   6057 			CSR_WRITE(sc, WMREG_EIMS, mask);
   6058 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   6059 			break;
   6060 		}
   6061 	} else
   6062 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   6063 
   6064 	/* Set up the inter-packet gap. */
   6065 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   6066 
   6067 	if (sc->sc_type >= WM_T_82543) {
   6068 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6069 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   6070 			wm_itrs_writereg(sc, wmq);
   6071 		}
   6072 		/*
   6073 		 * Link interrupts occur much less than TX
   6074 		 * interrupts and RX interrupts. So, we don't
   6075 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   6076 		 * FreeBSD's if_igb.
   6077 		 */
   6078 	}
   6079 
   6080 	/* Set the VLAN ethernetype. */
   6081 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   6082 
   6083 	/*
   6084 	 * Set up the transmit control register; we start out with
   6085 	 * a collision distance suitable for FDX, but update it whe
   6086 	 * we resolve the media type.
   6087 	 */
   6088 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   6089 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   6090 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   6091 	if (sc->sc_type >= WM_T_82571)
   6092 		sc->sc_tctl |= TCTL_MULR;
   6093 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   6094 
   6095 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6096 		/* Write TDT after TCTL.EN is set. See the document. */
   6097 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   6098 	}
   6099 
   6100 	if (sc->sc_type == WM_T_80003) {
   6101 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   6102 		reg &= ~TCTL_EXT_GCEX_MASK;
   6103 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   6104 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   6105 	}
   6106 
   6107 	/* Set the media. */
   6108 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   6109 		goto out;
   6110 
   6111 	/* Configure for OS presence */
   6112 	wm_init_manageability(sc);
   6113 
   6114 	/*
   6115 	 * Set up the receive control register; we actually program the
   6116 	 * register when we set the receive filter. Use multicast address
   6117 	 * offset type 0.
   6118 	 *
   6119 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   6120 	 * don't enable that feature.
   6121 	 */
   6122 	sc->sc_mchash_type = 0;
   6123 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   6124 	    | __SHIFTIN(sc->sc_mchash_type, RCTL_MO);
   6125 
   6126 	/*
   6127 	 * 82574 use one buffer extended Rx descriptor.
   6128 	 */
   6129 	if (sc->sc_type == WM_T_82574)
   6130 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   6131 
   6132 	/*
   6133 	 * The I350 has a bug where it always strips the CRC whether
   6134 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   6135 	 */
   6136 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   6137 	    || (sc->sc_type == WM_T_I210))
   6138 		sc->sc_rctl |= RCTL_SECRC;
   6139 
   6140 	if (((ec->ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   6141 	    && (ifp->if_mtu > ETHERMTU)) {
   6142 		sc->sc_rctl |= RCTL_LPE;
   6143 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6144 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6145 	}
   6146 
   6147 	if (MCLBYTES == 2048)
   6148 		sc->sc_rctl |= RCTL_2k;
   6149 	else {
   6150 		if (sc->sc_type >= WM_T_82543) {
   6151 			switch (MCLBYTES) {
   6152 			case 4096:
   6153 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6154 				break;
   6155 			case 8192:
   6156 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6157 				break;
   6158 			case 16384:
   6159 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6160 				break;
   6161 			default:
   6162 				panic("wm_init: MCLBYTES %d unsupported",
   6163 				    MCLBYTES);
   6164 				break;
   6165 			}
   6166 		} else
   6167 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6168 	}
   6169 
   6170 	/* Enable ECC */
   6171 	switch (sc->sc_type) {
   6172 	case WM_T_82571:
   6173 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6174 		reg |= PBA_ECC_CORR_EN;
   6175 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6176 		break;
   6177 	case WM_T_PCH_LPT:
   6178 	case WM_T_PCH_SPT:
   6179 	case WM_T_PCH_CNP:
   6180 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6181 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6182 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6183 
   6184 		sc->sc_ctrl |= CTRL_MEHE;
   6185 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6186 		break;
   6187 	default:
   6188 		break;
   6189 	}
   6190 
   6191 	/*
   6192 	 * Set the receive filter.
   6193 	 *
   6194 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6195 	 * the setting of RCTL.EN in wm_set_filter()
   6196 	 */
   6197 	wm_set_filter(sc);
   6198 
   6199 	/* On 575 and later set RDT only if RX enabled */
   6200 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6201 		int qidx;
   6202 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6203 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6204 			for (i = 0; i < WM_NRXDESC; i++) {
   6205 				mutex_enter(rxq->rxq_lock);
   6206 				wm_init_rxdesc(rxq, i);
   6207 				mutex_exit(rxq->rxq_lock);
   6208 
   6209 			}
   6210 		}
   6211 	}
   6212 
   6213 	wm_unset_stopping_flags(sc);
   6214 
   6215 	/* Start the one second link check clock. */
   6216 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6217 
   6218 	/* ...all done! */
   6219 	ifp->if_flags |= IFF_RUNNING;
   6220 	ifp->if_flags &= ~IFF_OACTIVE;
   6221 
   6222  out:
   6223 	/* Save last flags for the callback */
   6224 	sc->sc_if_flags = ifp->if_flags;
   6225 	sc->sc_ec_capenable = ec->ec_capenable;
   6226 	if (error)
   6227 		log(LOG_ERR, "%s: interface not running\n",
   6228 		    device_xname(sc->sc_dev));
   6229 	return error;
   6230 }
   6231 
   6232 /*
   6233  * wm_stop:		[ifnet interface function]
   6234  *
   6235  *	Stop transmission on the interface.
   6236  */
   6237 static void
   6238 wm_stop(struct ifnet *ifp, int disable)
   6239 {
   6240 	struct wm_softc *sc = ifp->if_softc;
   6241 
   6242 	WM_CORE_LOCK(sc);
   6243 	wm_stop_locked(ifp, disable);
   6244 	WM_CORE_UNLOCK(sc);
   6245 }
   6246 
   6247 static void
   6248 wm_stop_locked(struct ifnet *ifp, int disable)
   6249 {
   6250 	struct wm_softc *sc = ifp->if_softc;
   6251 	struct wm_txsoft *txs;
   6252 	int i, qidx;
   6253 
   6254 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6255 		device_xname(sc->sc_dev), __func__));
   6256 	KASSERT(WM_CORE_LOCKED(sc));
   6257 
   6258 	wm_set_stopping_flags(sc);
   6259 
   6260 	/* Stop the one second clock. */
   6261 	callout_stop(&sc->sc_tick_ch);
   6262 
   6263 	/* Stop the 82547 Tx FIFO stall check timer. */
   6264 	if (sc->sc_type == WM_T_82547)
   6265 		callout_stop(&sc->sc_txfifo_ch);
   6266 
   6267 	if (sc->sc_flags & WM_F_HAS_MII) {
   6268 		/* Down the MII. */
   6269 		mii_down(&sc->sc_mii);
   6270 	} else {
   6271 #if 0
   6272 		/* Should we clear PHY's status properly? */
   6273 		wm_reset(sc);
   6274 #endif
   6275 	}
   6276 
   6277 	/* Stop the transmit and receive processes. */
   6278 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6279 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6280 	sc->sc_rctl &= ~RCTL_EN;
   6281 
   6282 	/*
   6283 	 * Clear the interrupt mask to ensure the device cannot assert its
   6284 	 * interrupt line.
   6285 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6286 	 * service any currently pending or shared interrupt.
   6287 	 */
   6288 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6289 	sc->sc_icr = 0;
   6290 	if (wm_is_using_msix(sc)) {
   6291 		if (sc->sc_type != WM_T_82574) {
   6292 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6293 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6294 		} else
   6295 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6296 	}
   6297 
   6298 	/* Release any queued transmit buffers. */
   6299 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6300 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6301 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6302 		mutex_enter(txq->txq_lock);
   6303 		txq->txq_sending = false; /* ensure watchdog disabled */
   6304 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6305 			txs = &txq->txq_soft[i];
   6306 			if (txs->txs_mbuf != NULL) {
   6307 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6308 				m_freem(txs->txs_mbuf);
   6309 				txs->txs_mbuf = NULL;
   6310 			}
   6311 		}
   6312 		mutex_exit(txq->txq_lock);
   6313 	}
   6314 
   6315 	/* Mark the interface as down and cancel the watchdog timer. */
   6316 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6317 
   6318 	if (disable) {
   6319 		for (i = 0; i < sc->sc_nqueues; i++) {
   6320 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6321 			mutex_enter(rxq->rxq_lock);
   6322 			wm_rxdrain(rxq);
   6323 			mutex_exit(rxq->rxq_lock);
   6324 		}
   6325 	}
   6326 
   6327 #if 0 /* notyet */
   6328 	if (sc->sc_type >= WM_T_82544)
   6329 		CSR_WRITE(sc, WMREG_WUC, 0);
   6330 #endif
   6331 }
   6332 
   6333 static void
   6334 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6335 {
   6336 	struct mbuf *m;
   6337 	int i;
   6338 
   6339 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6340 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6341 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6342 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6343 		    m->m_data, m->m_len, m->m_flags);
   6344 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6345 	    i, i == 1 ? "" : "s");
   6346 }
   6347 
   6348 /*
   6349  * wm_82547_txfifo_stall:
   6350  *
   6351  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6352  *	reset the FIFO pointers, and restart packet transmission.
   6353  */
   6354 static void
   6355 wm_82547_txfifo_stall(void *arg)
   6356 {
   6357 	struct wm_softc *sc = arg;
   6358 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6359 
   6360 	mutex_enter(txq->txq_lock);
   6361 
   6362 	if (txq->txq_stopping)
   6363 		goto out;
   6364 
   6365 	if (txq->txq_fifo_stall) {
   6366 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6367 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6368 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6369 			/*
   6370 			 * Packets have drained.  Stop transmitter, reset
   6371 			 * FIFO pointers, restart transmitter, and kick
   6372 			 * the packet queue.
   6373 			 */
   6374 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6375 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6376 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6377 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6378 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6379 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6380 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6381 			CSR_WRITE_FLUSH(sc);
   6382 
   6383 			txq->txq_fifo_head = 0;
   6384 			txq->txq_fifo_stall = 0;
   6385 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6386 		} else {
   6387 			/*
   6388 			 * Still waiting for packets to drain; try again in
   6389 			 * another tick.
   6390 			 */
   6391 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6392 		}
   6393 	}
   6394 
   6395 out:
   6396 	mutex_exit(txq->txq_lock);
   6397 }
   6398 
   6399 /*
   6400  * wm_82547_txfifo_bugchk:
   6401  *
   6402  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6403  *	prevent enqueueing a packet that would wrap around the end
   6404  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6405  *
   6406  *	We do this by checking the amount of space before the end
   6407  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6408  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6409  *	the internal FIFO pointers to the beginning, and restart
   6410  *	transmission on the interface.
   6411  */
   6412 #define	WM_FIFO_HDR		0x10
   6413 #define	WM_82547_PAD_LEN	0x3e0
   6414 static int
   6415 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6416 {
   6417 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6418 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6419 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6420 
   6421 	/* Just return if already stalled. */
   6422 	if (txq->txq_fifo_stall)
   6423 		return 1;
   6424 
   6425 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6426 		/* Stall only occurs in half-duplex mode. */
   6427 		goto send_packet;
   6428 	}
   6429 
   6430 	if (len >= WM_82547_PAD_LEN + space) {
   6431 		txq->txq_fifo_stall = 1;
   6432 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6433 		return 1;
   6434 	}
   6435 
   6436  send_packet:
   6437 	txq->txq_fifo_head += len;
   6438 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6439 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6440 
   6441 	return 0;
   6442 }
   6443 
   6444 static int
   6445 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6446 {
   6447 	int error;
   6448 
   6449 	/*
   6450 	 * Allocate the control data structures, and create and load the
   6451 	 * DMA map for it.
   6452 	 *
   6453 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6454 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6455 	 * both sets within the same 4G segment.
   6456 	 */
   6457 	if (sc->sc_type < WM_T_82544)
   6458 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6459 	else
   6460 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6461 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6462 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6463 	else
   6464 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6465 
   6466 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6467 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6468 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6469 		aprint_error_dev(sc->sc_dev,
   6470 		    "unable to allocate TX control data, error = %d\n",
   6471 		    error);
   6472 		goto fail_0;
   6473 	}
   6474 
   6475 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6476 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6477 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6478 		aprint_error_dev(sc->sc_dev,
   6479 		    "unable to map TX control data, error = %d\n", error);
   6480 		goto fail_1;
   6481 	}
   6482 
   6483 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6484 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6485 		aprint_error_dev(sc->sc_dev,
   6486 		    "unable to create TX control data DMA map, error = %d\n",
   6487 		    error);
   6488 		goto fail_2;
   6489 	}
   6490 
   6491 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6492 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6493 		aprint_error_dev(sc->sc_dev,
   6494 		    "unable to load TX control data DMA map, error = %d\n",
   6495 		    error);
   6496 		goto fail_3;
   6497 	}
   6498 
   6499 	return 0;
   6500 
   6501  fail_3:
   6502 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6503  fail_2:
   6504 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6505 	    WM_TXDESCS_SIZE(txq));
   6506  fail_1:
   6507 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6508  fail_0:
   6509 	return error;
   6510 }
   6511 
   6512 static void
   6513 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6514 {
   6515 
   6516 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6517 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6518 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6519 	    WM_TXDESCS_SIZE(txq));
   6520 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6521 }
   6522 
   6523 static int
   6524 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6525 {
   6526 	int error;
   6527 	size_t rxq_descs_size;
   6528 
   6529 	/*
   6530 	 * Allocate the control data structures, and create and load the
   6531 	 * DMA map for it.
   6532 	 *
   6533 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6534 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6535 	 * both sets within the same 4G segment.
   6536 	 */
   6537 	rxq->rxq_ndesc = WM_NRXDESC;
   6538 	if (sc->sc_type == WM_T_82574)
   6539 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6540 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6541 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6542 	else
   6543 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6544 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6545 
   6546 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6547 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6548 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6549 		aprint_error_dev(sc->sc_dev,
   6550 		    "unable to allocate RX control data, error = %d\n",
   6551 		    error);
   6552 		goto fail_0;
   6553 	}
   6554 
   6555 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6556 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6557 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6558 		aprint_error_dev(sc->sc_dev,
   6559 		    "unable to map RX control data, error = %d\n", error);
   6560 		goto fail_1;
   6561 	}
   6562 
   6563 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6564 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6565 		aprint_error_dev(sc->sc_dev,
   6566 		    "unable to create RX control data DMA map, error = %d\n",
   6567 		    error);
   6568 		goto fail_2;
   6569 	}
   6570 
   6571 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6572 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6573 		aprint_error_dev(sc->sc_dev,
   6574 		    "unable to load RX control data DMA map, error = %d\n",
   6575 		    error);
   6576 		goto fail_3;
   6577 	}
   6578 
   6579 	return 0;
   6580 
   6581  fail_3:
   6582 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6583  fail_2:
   6584 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6585 	    rxq_descs_size);
   6586  fail_1:
   6587 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6588  fail_0:
   6589 	return error;
   6590 }
   6591 
   6592 static void
   6593 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6594 {
   6595 
   6596 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6597 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6598 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6599 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6600 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6601 }
   6602 
   6603 
   6604 static int
   6605 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6606 {
   6607 	int i, error;
   6608 
   6609 	/* Create the transmit buffer DMA maps. */
   6610 	WM_TXQUEUELEN(txq) =
   6611 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6612 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6613 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6614 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6615 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6616 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6617 			aprint_error_dev(sc->sc_dev,
   6618 			    "unable to create Tx DMA map %d, error = %d\n",
   6619 			    i, error);
   6620 			goto fail;
   6621 		}
   6622 	}
   6623 
   6624 	return 0;
   6625 
   6626  fail:
   6627 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6628 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6629 			bus_dmamap_destroy(sc->sc_dmat,
   6630 			    txq->txq_soft[i].txs_dmamap);
   6631 	}
   6632 	return error;
   6633 }
   6634 
   6635 static void
   6636 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6637 {
   6638 	int i;
   6639 
   6640 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6641 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6642 			bus_dmamap_destroy(sc->sc_dmat,
   6643 			    txq->txq_soft[i].txs_dmamap);
   6644 	}
   6645 }
   6646 
   6647 static int
   6648 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6649 {
   6650 	int i, error;
   6651 
   6652 	/* Create the receive buffer DMA maps. */
   6653 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6654 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6655 			    MCLBYTES, 0, 0,
   6656 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6657 			aprint_error_dev(sc->sc_dev,
   6658 			    "unable to create Rx DMA map %d error = %d\n",
   6659 			    i, error);
   6660 			goto fail;
   6661 		}
   6662 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6663 	}
   6664 
   6665 	return 0;
   6666 
   6667  fail:
   6668 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6669 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6670 			bus_dmamap_destroy(sc->sc_dmat,
   6671 			    rxq->rxq_soft[i].rxs_dmamap);
   6672 	}
   6673 	return error;
   6674 }
   6675 
   6676 static void
   6677 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6678 {
   6679 	int i;
   6680 
   6681 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6682 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6683 			bus_dmamap_destroy(sc->sc_dmat,
   6684 			    rxq->rxq_soft[i].rxs_dmamap);
   6685 	}
   6686 }
   6687 
   6688 /*
   6689  * wm_alloc_quques:
   6690  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6691  */
   6692 static int
   6693 wm_alloc_txrx_queues(struct wm_softc *sc)
   6694 {
   6695 	int i, error, tx_done, rx_done;
   6696 
   6697 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6698 	    KM_SLEEP);
   6699 	if (sc->sc_queue == NULL) {
   6700 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6701 		error = ENOMEM;
   6702 		goto fail_0;
   6703 	}
   6704 
   6705 	/*
   6706 	 * For transmission
   6707 	 */
   6708 	error = 0;
   6709 	tx_done = 0;
   6710 	for (i = 0; i < sc->sc_nqueues; i++) {
   6711 #ifdef WM_EVENT_COUNTERS
   6712 		int j;
   6713 		const char *xname;
   6714 #endif
   6715 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6716 		txq->txq_sc = sc;
   6717 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6718 
   6719 		error = wm_alloc_tx_descs(sc, txq);
   6720 		if (error)
   6721 			break;
   6722 		error = wm_alloc_tx_buffer(sc, txq);
   6723 		if (error) {
   6724 			wm_free_tx_descs(sc, txq);
   6725 			break;
   6726 		}
   6727 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6728 		if (txq->txq_interq == NULL) {
   6729 			wm_free_tx_descs(sc, txq);
   6730 			wm_free_tx_buffer(sc, txq);
   6731 			error = ENOMEM;
   6732 			break;
   6733 		}
   6734 
   6735 #ifdef WM_EVENT_COUNTERS
   6736 		xname = device_xname(sc->sc_dev);
   6737 
   6738 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6739 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6740 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6741 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6742 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6743 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6744 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6745 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6746 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6747 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6748 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6749 
   6750 		for (j = 0; j < WM_NTXSEGS; j++) {
   6751 			snprintf(txq->txq_txseg_evcnt_names[j],
   6752 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6753 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6754 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6755 		}
   6756 
   6757 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6758 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6759 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6760 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6761 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6762 #endif /* WM_EVENT_COUNTERS */
   6763 
   6764 		tx_done++;
   6765 	}
   6766 	if (error)
   6767 		goto fail_1;
   6768 
   6769 	/*
   6770 	 * For recieve
   6771 	 */
   6772 	error = 0;
   6773 	rx_done = 0;
   6774 	for (i = 0; i < sc->sc_nqueues; i++) {
   6775 #ifdef WM_EVENT_COUNTERS
   6776 		const char *xname;
   6777 #endif
   6778 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6779 		rxq->rxq_sc = sc;
   6780 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6781 
   6782 		error = wm_alloc_rx_descs(sc, rxq);
   6783 		if (error)
   6784 			break;
   6785 
   6786 		error = wm_alloc_rx_buffer(sc, rxq);
   6787 		if (error) {
   6788 			wm_free_rx_descs(sc, rxq);
   6789 			break;
   6790 		}
   6791 
   6792 #ifdef WM_EVENT_COUNTERS
   6793 		xname = device_xname(sc->sc_dev);
   6794 
   6795 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6796 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6797 
   6798 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6799 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6800 #endif /* WM_EVENT_COUNTERS */
   6801 
   6802 		rx_done++;
   6803 	}
   6804 	if (error)
   6805 		goto fail_2;
   6806 
   6807 	return 0;
   6808 
   6809  fail_2:
   6810 	for (i = 0; i < rx_done; i++) {
   6811 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6812 		wm_free_rx_buffer(sc, rxq);
   6813 		wm_free_rx_descs(sc, rxq);
   6814 		if (rxq->rxq_lock)
   6815 			mutex_obj_free(rxq->rxq_lock);
   6816 	}
   6817  fail_1:
   6818 	for (i = 0; i < tx_done; i++) {
   6819 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6820 		pcq_destroy(txq->txq_interq);
   6821 		wm_free_tx_buffer(sc, txq);
   6822 		wm_free_tx_descs(sc, txq);
   6823 		if (txq->txq_lock)
   6824 			mutex_obj_free(txq->txq_lock);
   6825 	}
   6826 
   6827 	kmem_free(sc->sc_queue,
   6828 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6829  fail_0:
   6830 	return error;
   6831 }
   6832 
   6833 /*
   6834  * wm_free_quques:
   6835  *	Free {tx,rx}descs and {tx,rx} buffers
   6836  */
   6837 static void
   6838 wm_free_txrx_queues(struct wm_softc *sc)
   6839 {
   6840 	int i;
   6841 
   6842 	for (i = 0; i < sc->sc_nqueues; i++) {
   6843 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6844 
   6845 #ifdef WM_EVENT_COUNTERS
   6846 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6847 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6848 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6849 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6850 #endif /* WM_EVENT_COUNTERS */
   6851 
   6852 		wm_free_rx_buffer(sc, rxq);
   6853 		wm_free_rx_descs(sc, rxq);
   6854 		if (rxq->rxq_lock)
   6855 			mutex_obj_free(rxq->rxq_lock);
   6856 	}
   6857 
   6858 	for (i = 0; i < sc->sc_nqueues; i++) {
   6859 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6860 		struct mbuf *m;
   6861 #ifdef WM_EVENT_COUNTERS
   6862 		int j;
   6863 
   6864 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6865 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6866 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6867 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6868 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6869 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6870 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6871 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6872 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6873 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6874 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6875 
   6876 		for (j = 0; j < WM_NTXSEGS; j++)
   6877 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6878 
   6879 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6880 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6881 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6882 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6883 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6884 #endif /* WM_EVENT_COUNTERS */
   6885 
   6886 		/* drain txq_interq */
   6887 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6888 			m_freem(m);
   6889 		pcq_destroy(txq->txq_interq);
   6890 
   6891 		wm_free_tx_buffer(sc, txq);
   6892 		wm_free_tx_descs(sc, txq);
   6893 		if (txq->txq_lock)
   6894 			mutex_obj_free(txq->txq_lock);
   6895 	}
   6896 
   6897 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6898 }
   6899 
   6900 static void
   6901 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6902 {
   6903 
   6904 	KASSERT(mutex_owned(txq->txq_lock));
   6905 
   6906 	/* Initialize the transmit descriptor ring. */
   6907 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6908 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6909 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6910 	txq->txq_free = WM_NTXDESC(txq);
   6911 	txq->txq_next = 0;
   6912 }
   6913 
   6914 static void
   6915 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6916     struct wm_txqueue *txq)
   6917 {
   6918 
   6919 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6920 		device_xname(sc->sc_dev), __func__));
   6921 	KASSERT(mutex_owned(txq->txq_lock));
   6922 
   6923 	if (sc->sc_type < WM_T_82543) {
   6924 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6925 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6926 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6927 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6928 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6929 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6930 	} else {
   6931 		int qid = wmq->wmq_id;
   6932 
   6933 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6934 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6935 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6936 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6937 
   6938 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6939 			/*
   6940 			 * Don't write TDT before TCTL.EN is set.
   6941 			 * See the document.
   6942 			 */
   6943 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6944 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6945 			    | TXDCTL_WTHRESH(0));
   6946 		else {
   6947 			/* XXX should update with AIM? */
   6948 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6949 			if (sc->sc_type >= WM_T_82540) {
   6950 				/* should be same */
   6951 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6952 			}
   6953 
   6954 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6955 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6956 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6957 		}
   6958 	}
   6959 }
   6960 
   6961 static void
   6962 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6963 {
   6964 	int i;
   6965 
   6966 	KASSERT(mutex_owned(txq->txq_lock));
   6967 
   6968 	/* Initialize the transmit job descriptors. */
   6969 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6970 		txq->txq_soft[i].txs_mbuf = NULL;
   6971 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6972 	txq->txq_snext = 0;
   6973 	txq->txq_sdirty = 0;
   6974 }
   6975 
   6976 static void
   6977 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6978     struct wm_txqueue *txq)
   6979 {
   6980 
   6981 	KASSERT(mutex_owned(txq->txq_lock));
   6982 
   6983 	/*
   6984 	 * Set up some register offsets that are different between
   6985 	 * the i82542 and the i82543 and later chips.
   6986 	 */
   6987 	if (sc->sc_type < WM_T_82543)
   6988 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6989 	else
   6990 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6991 
   6992 	wm_init_tx_descs(sc, txq);
   6993 	wm_init_tx_regs(sc, wmq, txq);
   6994 	wm_init_tx_buffer(sc, txq);
   6995 
   6996 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   6997 	txq->txq_sending = false;
   6998 }
   6999 
   7000 static void
   7001 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   7002     struct wm_rxqueue *rxq)
   7003 {
   7004 
   7005 	KASSERT(mutex_owned(rxq->rxq_lock));
   7006 
   7007 	/*
   7008 	 * Initialize the receive descriptor and receive job
   7009 	 * descriptor rings.
   7010 	 */
   7011 	if (sc->sc_type < WM_T_82543) {
   7012 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   7013 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   7014 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   7015 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7016 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   7017 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   7018 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   7019 
   7020 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   7021 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   7022 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   7023 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   7024 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   7025 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   7026 	} else {
   7027 		int qid = wmq->wmq_id;
   7028 
   7029 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   7030 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   7031 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   7032 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   7033 
   7034 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7035 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   7036 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   7037 
   7038 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   7039 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   7040 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   7041 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   7042 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   7043 			    | RXDCTL_WTHRESH(1));
   7044 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7045 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7046 		} else {
   7047 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   7048 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   7049 			/* XXX should update with AIM? */
   7050 			CSR_WRITE(sc, WMREG_RDTR,
   7051 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   7052 			/* MUST be same */
   7053 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   7054 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   7055 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   7056 		}
   7057 	}
   7058 }
   7059 
   7060 static int
   7061 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   7062 {
   7063 	struct wm_rxsoft *rxs;
   7064 	int error, i;
   7065 
   7066 	KASSERT(mutex_owned(rxq->rxq_lock));
   7067 
   7068 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   7069 		rxs = &rxq->rxq_soft[i];
   7070 		if (rxs->rxs_mbuf == NULL) {
   7071 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   7072 				log(LOG_ERR, "%s: unable to allocate or map "
   7073 				    "rx buffer %d, error = %d\n",
   7074 				    device_xname(sc->sc_dev), i, error);
   7075 				/*
   7076 				 * XXX Should attempt to run with fewer receive
   7077 				 * XXX buffers instead of just failing.
   7078 				 */
   7079 				wm_rxdrain(rxq);
   7080 				return ENOMEM;
   7081 			}
   7082 		} else {
   7083 			/*
   7084 			 * For 82575 and 82576, the RX descriptors must be
   7085 			 * initialized after the setting of RCTL.EN in
   7086 			 * wm_set_filter()
   7087 			 */
   7088 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   7089 				wm_init_rxdesc(rxq, i);
   7090 		}
   7091 	}
   7092 	rxq->rxq_ptr = 0;
   7093 	rxq->rxq_discard = 0;
   7094 	WM_RXCHAIN_RESET(rxq);
   7095 
   7096 	return 0;
   7097 }
   7098 
   7099 static int
   7100 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   7101     struct wm_rxqueue *rxq)
   7102 {
   7103 
   7104 	KASSERT(mutex_owned(rxq->rxq_lock));
   7105 
   7106 	/*
   7107 	 * Set up some register offsets that are different between
   7108 	 * the i82542 and the i82543 and later chips.
   7109 	 */
   7110 	if (sc->sc_type < WM_T_82543)
   7111 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   7112 	else
   7113 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   7114 
   7115 	wm_init_rx_regs(sc, wmq, rxq);
   7116 	return wm_init_rx_buffer(sc, rxq);
   7117 }
   7118 
   7119 /*
   7120  * wm_init_quques:
   7121  *	Initialize {tx,rx}descs and {tx,rx} buffers
   7122  */
   7123 static int
   7124 wm_init_txrx_queues(struct wm_softc *sc)
   7125 {
   7126 	int i, error = 0;
   7127 
   7128 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7129 		device_xname(sc->sc_dev), __func__));
   7130 
   7131 	for (i = 0; i < sc->sc_nqueues; i++) {
   7132 		struct wm_queue *wmq = &sc->sc_queue[i];
   7133 		struct wm_txqueue *txq = &wmq->wmq_txq;
   7134 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7135 
   7136 		/*
   7137 		 * TODO
   7138 		 * Currently, use constant variable instead of AIM.
   7139 		 * Furthermore, the interrupt interval of multiqueue which use
   7140 		 * polling mode is less than default value.
   7141 		 * More tuning and AIM are required.
   7142 		 */
   7143 		if (wm_is_using_multiqueue(sc))
   7144 			wmq->wmq_itr = 50;
   7145 		else
   7146 			wmq->wmq_itr = sc->sc_itr_init;
   7147 		wmq->wmq_set_itr = true;
   7148 
   7149 		mutex_enter(txq->txq_lock);
   7150 		wm_init_tx_queue(sc, wmq, txq);
   7151 		mutex_exit(txq->txq_lock);
   7152 
   7153 		mutex_enter(rxq->rxq_lock);
   7154 		error = wm_init_rx_queue(sc, wmq, rxq);
   7155 		mutex_exit(rxq->rxq_lock);
   7156 		if (error)
   7157 			break;
   7158 	}
   7159 
   7160 	return error;
   7161 }
   7162 
   7163 /*
   7164  * wm_tx_offload:
   7165  *
   7166  *	Set up TCP/IP checksumming parameters for the
   7167  *	specified packet.
   7168  */
   7169 static int
   7170 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7171     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7172 {
   7173 	struct mbuf *m0 = txs->txs_mbuf;
   7174 	struct livengood_tcpip_ctxdesc *t;
   7175 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7176 	uint32_t ipcse;
   7177 	struct ether_header *eh;
   7178 	int offset, iphl;
   7179 	uint8_t fields;
   7180 
   7181 	/*
   7182 	 * XXX It would be nice if the mbuf pkthdr had offset
   7183 	 * fields for the protocol headers.
   7184 	 */
   7185 
   7186 	eh = mtod(m0, struct ether_header *);
   7187 	switch (htons(eh->ether_type)) {
   7188 	case ETHERTYPE_IP:
   7189 	case ETHERTYPE_IPV6:
   7190 		offset = ETHER_HDR_LEN;
   7191 		break;
   7192 
   7193 	case ETHERTYPE_VLAN:
   7194 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7195 		break;
   7196 
   7197 	default:
   7198 		/*
   7199 		 * Don't support this protocol or encapsulation.
   7200 		 */
   7201 		*fieldsp = 0;
   7202 		*cmdp = 0;
   7203 		return 0;
   7204 	}
   7205 
   7206 	if ((m0->m_pkthdr.csum_flags &
   7207 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7208 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7209 	} else
   7210 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7211 
   7212 	ipcse = offset + iphl - 1;
   7213 
   7214 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7215 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7216 	seg = 0;
   7217 	fields = 0;
   7218 
   7219 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7220 		int hlen = offset + iphl;
   7221 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7222 
   7223 		if (__predict_false(m0->m_len <
   7224 				    (hlen + sizeof(struct tcphdr)))) {
   7225 			/*
   7226 			 * TCP/IP headers are not in the first mbuf; we need
   7227 			 * to do this the slow and painful way. Let's just
   7228 			 * hope this doesn't happen very often.
   7229 			 */
   7230 			struct tcphdr th;
   7231 
   7232 			WM_Q_EVCNT_INCR(txq, tsopain);
   7233 
   7234 			m_copydata(m0, hlen, sizeof(th), &th);
   7235 			if (v4) {
   7236 				struct ip ip;
   7237 
   7238 				m_copydata(m0, offset, sizeof(ip), &ip);
   7239 				ip.ip_len = 0;
   7240 				m_copyback(m0,
   7241 				    offset + offsetof(struct ip, ip_len),
   7242 				    sizeof(ip.ip_len), &ip.ip_len);
   7243 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7244 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7245 			} else {
   7246 				struct ip6_hdr ip6;
   7247 
   7248 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7249 				ip6.ip6_plen = 0;
   7250 				m_copyback(m0,
   7251 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7252 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7253 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7254 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7255 			}
   7256 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7257 			    sizeof(th.th_sum), &th.th_sum);
   7258 
   7259 			hlen += th.th_off << 2;
   7260 		} else {
   7261 			/*
   7262 			 * TCP/IP headers are in the first mbuf; we can do
   7263 			 * this the easy way.
   7264 			 */
   7265 			struct tcphdr *th;
   7266 
   7267 			if (v4) {
   7268 				struct ip *ip =
   7269 				    (void *)(mtod(m0, char *) + offset);
   7270 				th = (void *)(mtod(m0, char *) + hlen);
   7271 
   7272 				ip->ip_len = 0;
   7273 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7274 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7275 			} else {
   7276 				struct ip6_hdr *ip6 =
   7277 				    (void *)(mtod(m0, char *) + offset);
   7278 				th = (void *)(mtod(m0, char *) + hlen);
   7279 
   7280 				ip6->ip6_plen = 0;
   7281 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7282 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7283 			}
   7284 			hlen += th->th_off << 2;
   7285 		}
   7286 
   7287 		if (v4) {
   7288 			WM_Q_EVCNT_INCR(txq, tso);
   7289 			cmdlen |= WTX_TCPIP_CMD_IP;
   7290 		} else {
   7291 			WM_Q_EVCNT_INCR(txq, tso6);
   7292 			ipcse = 0;
   7293 		}
   7294 		cmd |= WTX_TCPIP_CMD_TSE;
   7295 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7296 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7297 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7298 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7299 	}
   7300 
   7301 	/*
   7302 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7303 	 * offload feature, if we load the context descriptor, we
   7304 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7305 	 */
   7306 
   7307 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7308 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7309 	    WTX_TCPIP_IPCSE(ipcse);
   7310 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7311 		WM_Q_EVCNT_INCR(txq, ipsum);
   7312 		fields |= WTX_IXSM;
   7313 	}
   7314 
   7315 	offset += iphl;
   7316 
   7317 	if (m0->m_pkthdr.csum_flags &
   7318 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7319 		WM_Q_EVCNT_INCR(txq, tusum);
   7320 		fields |= WTX_TXSM;
   7321 		tucs = WTX_TCPIP_TUCSS(offset) |
   7322 		    WTX_TCPIP_TUCSO(offset +
   7323 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7324 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7325 	} else if ((m0->m_pkthdr.csum_flags &
   7326 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7327 		WM_Q_EVCNT_INCR(txq, tusum6);
   7328 		fields |= WTX_TXSM;
   7329 		tucs = WTX_TCPIP_TUCSS(offset) |
   7330 		    WTX_TCPIP_TUCSO(offset +
   7331 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7332 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7333 	} else {
   7334 		/* Just initialize it to a valid TCP context. */
   7335 		tucs = WTX_TCPIP_TUCSS(offset) |
   7336 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7337 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7338 	}
   7339 
   7340 	/*
   7341 	 * We don't have to write context descriptor for every packet
   7342 	 * except for 82574. For 82574, we must write context descriptor
   7343 	 * for every packet when we use two descriptor queues.
   7344 	 * It would be overhead to write context descriptor for every packet,
   7345 	 * however it does not cause problems.
   7346 	 */
   7347 	/* Fill in the context descriptor. */
   7348 	t = (struct livengood_tcpip_ctxdesc *)
   7349 	    &txq->txq_descs[txq->txq_next];
   7350 	t->tcpip_ipcs = htole32(ipcs);
   7351 	t->tcpip_tucs = htole32(tucs);
   7352 	t->tcpip_cmdlen = htole32(cmdlen);
   7353 	t->tcpip_seg = htole32(seg);
   7354 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7355 
   7356 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7357 	txs->txs_ndesc++;
   7358 
   7359 	*cmdp = cmd;
   7360 	*fieldsp = fields;
   7361 
   7362 	return 0;
   7363 }
   7364 
   7365 static inline int
   7366 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7367 {
   7368 	struct wm_softc *sc = ifp->if_softc;
   7369 	u_int cpuid = cpu_index(curcpu());
   7370 
   7371 	/*
   7372 	 * Currently, simple distribute strategy.
   7373 	 * TODO:
   7374 	 * distribute by flowid(RSS has value).
   7375 	 */
   7376 	return ((cpuid + ncpu - sc->sc_affinity_offset) % ncpu) % sc->sc_nqueues;
   7377 }
   7378 
   7379 /*
   7380  * wm_start:		[ifnet interface function]
   7381  *
   7382  *	Start packet transmission on the interface.
   7383  */
   7384 static void
   7385 wm_start(struct ifnet *ifp)
   7386 {
   7387 	struct wm_softc *sc = ifp->if_softc;
   7388 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7389 
   7390 #ifdef WM_MPSAFE
   7391 	KASSERT(if_is_mpsafe(ifp));
   7392 #endif
   7393 	/*
   7394 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7395 	 */
   7396 
   7397 	mutex_enter(txq->txq_lock);
   7398 	if (!txq->txq_stopping)
   7399 		wm_start_locked(ifp);
   7400 	mutex_exit(txq->txq_lock);
   7401 }
   7402 
   7403 static void
   7404 wm_start_locked(struct ifnet *ifp)
   7405 {
   7406 	struct wm_softc *sc = ifp->if_softc;
   7407 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7408 
   7409 	wm_send_common_locked(ifp, txq, false);
   7410 }
   7411 
   7412 static int
   7413 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7414 {
   7415 	int qid;
   7416 	struct wm_softc *sc = ifp->if_softc;
   7417 	struct wm_txqueue *txq;
   7418 
   7419 	qid = wm_select_txqueue(ifp, m);
   7420 	txq = &sc->sc_queue[qid].wmq_txq;
   7421 
   7422 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7423 		m_freem(m);
   7424 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7425 		return ENOBUFS;
   7426 	}
   7427 
   7428 	/*
   7429 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7430 	 */
   7431 	ifp->if_obytes += m->m_pkthdr.len;
   7432 	if (m->m_flags & M_MCAST)
   7433 		ifp->if_omcasts++;
   7434 
   7435 	if (mutex_tryenter(txq->txq_lock)) {
   7436 		if (!txq->txq_stopping)
   7437 			wm_transmit_locked(ifp, txq);
   7438 		mutex_exit(txq->txq_lock);
   7439 	}
   7440 
   7441 	return 0;
   7442 }
   7443 
   7444 static void
   7445 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7446 {
   7447 
   7448 	wm_send_common_locked(ifp, txq, true);
   7449 }
   7450 
   7451 static void
   7452 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7453     bool is_transmit)
   7454 {
   7455 	struct wm_softc *sc = ifp->if_softc;
   7456 	struct mbuf *m0;
   7457 	struct wm_txsoft *txs;
   7458 	bus_dmamap_t dmamap;
   7459 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7460 	bus_addr_t curaddr;
   7461 	bus_size_t seglen, curlen;
   7462 	uint32_t cksumcmd;
   7463 	uint8_t cksumfields;
   7464 	bool remap = true;
   7465 
   7466 	KASSERT(mutex_owned(txq->txq_lock));
   7467 
   7468 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7469 		return;
   7470 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7471 		return;
   7472 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7473 		return;
   7474 
   7475 	/* Remember the previous number of free descriptors. */
   7476 	ofree = txq->txq_free;
   7477 
   7478 	/*
   7479 	 * Loop through the send queue, setting up transmit descriptors
   7480 	 * until we drain the queue, or use up all available transmit
   7481 	 * descriptors.
   7482 	 */
   7483 	for (;;) {
   7484 		m0 = NULL;
   7485 
   7486 		/* Get a work queue entry. */
   7487 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7488 			wm_txeof(txq, UINT_MAX);
   7489 			if (txq->txq_sfree == 0) {
   7490 				DPRINTF(WM_DEBUG_TX,
   7491 				    ("%s: TX: no free job descriptors\n",
   7492 					device_xname(sc->sc_dev)));
   7493 				WM_Q_EVCNT_INCR(txq, txsstall);
   7494 				break;
   7495 			}
   7496 		}
   7497 
   7498 		/* Grab a packet off the queue. */
   7499 		if (is_transmit)
   7500 			m0 = pcq_get(txq->txq_interq);
   7501 		else
   7502 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7503 		if (m0 == NULL)
   7504 			break;
   7505 
   7506 		DPRINTF(WM_DEBUG_TX,
   7507 		    ("%s: TX: have packet to transmit: %p\n",
   7508 			device_xname(sc->sc_dev), m0));
   7509 
   7510 		txs = &txq->txq_soft[txq->txq_snext];
   7511 		dmamap = txs->txs_dmamap;
   7512 
   7513 		use_tso = (m0->m_pkthdr.csum_flags &
   7514 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7515 
   7516 		/*
   7517 		 * So says the Linux driver:
   7518 		 * The controller does a simple calculation to make sure
   7519 		 * there is enough room in the FIFO before initiating the
   7520 		 * DMA for each buffer. The calc is:
   7521 		 *	4 = ceil(buffer len / MSS)
   7522 		 * To make sure we don't overrun the FIFO, adjust the max
   7523 		 * buffer len if the MSS drops.
   7524 		 */
   7525 		dmamap->dm_maxsegsz =
   7526 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7527 		    ? m0->m_pkthdr.segsz << 2
   7528 		    : WTX_MAX_LEN;
   7529 
   7530 		/*
   7531 		 * Load the DMA map.  If this fails, the packet either
   7532 		 * didn't fit in the allotted number of segments, or we
   7533 		 * were short on resources.  For the too-many-segments
   7534 		 * case, we simply report an error and drop the packet,
   7535 		 * since we can't sanely copy a jumbo packet to a single
   7536 		 * buffer.
   7537 		 */
   7538 retry:
   7539 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7540 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7541 		if (__predict_false(error)) {
   7542 			if (error == EFBIG) {
   7543 				if (remap == true) {
   7544 					struct mbuf *m;
   7545 
   7546 					remap = false;
   7547 					m = m_defrag(m0, M_NOWAIT);
   7548 					if (m != NULL) {
   7549 						WM_Q_EVCNT_INCR(txq, defrag);
   7550 						m0 = m;
   7551 						goto retry;
   7552 					}
   7553 				}
   7554 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7555 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7556 				    "DMA segments, dropping...\n",
   7557 				    device_xname(sc->sc_dev));
   7558 				wm_dump_mbuf_chain(sc, m0);
   7559 				m_freem(m0);
   7560 				continue;
   7561 			}
   7562 			/*  Short on resources, just stop for now. */
   7563 			DPRINTF(WM_DEBUG_TX,
   7564 			    ("%s: TX: dmamap load failed: %d\n",
   7565 				device_xname(sc->sc_dev), error));
   7566 			break;
   7567 		}
   7568 
   7569 		segs_needed = dmamap->dm_nsegs;
   7570 		if (use_tso) {
   7571 			/* For sentinel descriptor; see below. */
   7572 			segs_needed++;
   7573 		}
   7574 
   7575 		/*
   7576 		 * Ensure we have enough descriptors free to describe
   7577 		 * the packet. Note, we always reserve one descriptor
   7578 		 * at the end of the ring due to the semantics of the
   7579 		 * TDT register, plus one more in the event we need
   7580 		 * to load offload context.
   7581 		 */
   7582 		if (segs_needed > txq->txq_free - 2) {
   7583 			/*
   7584 			 * Not enough free descriptors to transmit this
   7585 			 * packet.  We haven't committed anything yet,
   7586 			 * so just unload the DMA map, put the packet
   7587 			 * pack on the queue, and punt. Notify the upper
   7588 			 * layer that there are no more slots left.
   7589 			 */
   7590 			DPRINTF(WM_DEBUG_TX,
   7591 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7592 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7593 				segs_needed, txq->txq_free - 1));
   7594 			if (!is_transmit)
   7595 				ifp->if_flags |= IFF_OACTIVE;
   7596 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7597 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7598 			WM_Q_EVCNT_INCR(txq, txdstall);
   7599 			break;
   7600 		}
   7601 
   7602 		/*
   7603 		 * Check for 82547 Tx FIFO bug. We need to do this
   7604 		 * once we know we can transmit the packet, since we
   7605 		 * do some internal FIFO space accounting here.
   7606 		 */
   7607 		if (sc->sc_type == WM_T_82547 &&
   7608 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7609 			DPRINTF(WM_DEBUG_TX,
   7610 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7611 				device_xname(sc->sc_dev)));
   7612 			if (!is_transmit)
   7613 				ifp->if_flags |= IFF_OACTIVE;
   7614 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7615 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7616 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7617 			break;
   7618 		}
   7619 
   7620 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7621 
   7622 		DPRINTF(WM_DEBUG_TX,
   7623 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7624 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7625 
   7626 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7627 
   7628 		/*
   7629 		 * Store a pointer to the packet so that we can free it
   7630 		 * later.
   7631 		 *
   7632 		 * Initially, we consider the number of descriptors the
   7633 		 * packet uses the number of DMA segments.  This may be
   7634 		 * incremented by 1 if we do checksum offload (a descriptor
   7635 		 * is used to set the checksum context).
   7636 		 */
   7637 		txs->txs_mbuf = m0;
   7638 		txs->txs_firstdesc = txq->txq_next;
   7639 		txs->txs_ndesc = segs_needed;
   7640 
   7641 		/* Set up offload parameters for this packet. */
   7642 		if (m0->m_pkthdr.csum_flags &
   7643 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7644 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7645 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7646 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7647 					  &cksumfields) != 0) {
   7648 				/* Error message already displayed. */
   7649 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7650 				continue;
   7651 			}
   7652 		} else {
   7653 			cksumcmd = 0;
   7654 			cksumfields = 0;
   7655 		}
   7656 
   7657 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7658 
   7659 		/* Sync the DMA map. */
   7660 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7661 		    BUS_DMASYNC_PREWRITE);
   7662 
   7663 		/* Initialize the transmit descriptor. */
   7664 		for (nexttx = txq->txq_next, seg = 0;
   7665 		     seg < dmamap->dm_nsegs; seg++) {
   7666 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7667 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7668 			     seglen != 0;
   7669 			     curaddr += curlen, seglen -= curlen,
   7670 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7671 				curlen = seglen;
   7672 
   7673 				/*
   7674 				 * So says the Linux driver:
   7675 				 * Work around for premature descriptor
   7676 				 * write-backs in TSO mode.  Append a
   7677 				 * 4-byte sentinel descriptor.
   7678 				 */
   7679 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7680 				    curlen > 8)
   7681 					curlen -= 4;
   7682 
   7683 				wm_set_dma_addr(
   7684 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7685 				txq->txq_descs[nexttx].wtx_cmdlen
   7686 				    = htole32(cksumcmd | curlen);
   7687 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7688 				    = 0;
   7689 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7690 				    = cksumfields;
   7691 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7692 				lasttx = nexttx;
   7693 
   7694 				DPRINTF(WM_DEBUG_TX,
   7695 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7696 					"len %#04zx\n",
   7697 					device_xname(sc->sc_dev), nexttx,
   7698 					(uint64_t)curaddr, curlen));
   7699 			}
   7700 		}
   7701 
   7702 		KASSERT(lasttx != -1);
   7703 
   7704 		/*
   7705 		 * Set up the command byte on the last descriptor of
   7706 		 * the packet. If we're in the interrupt delay window,
   7707 		 * delay the interrupt.
   7708 		 */
   7709 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7710 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7711 
   7712 		/*
   7713 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7714 		 * up the descriptor to encapsulate the packet for us.
   7715 		 *
   7716 		 * This is only valid on the last descriptor of the packet.
   7717 		 */
   7718 		if (vlan_has_tag(m0)) {
   7719 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7720 			    htole32(WTX_CMD_VLE);
   7721 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7722 			    = htole16(vlan_get_tag(m0));
   7723 		}
   7724 
   7725 		txs->txs_lastdesc = lasttx;
   7726 
   7727 		DPRINTF(WM_DEBUG_TX,
   7728 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7729 			device_xname(sc->sc_dev),
   7730 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7731 
   7732 		/* Sync the descriptors we're using. */
   7733 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7734 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7735 
   7736 		/* Give the packet to the chip. */
   7737 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7738 
   7739 		DPRINTF(WM_DEBUG_TX,
   7740 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7741 
   7742 		DPRINTF(WM_DEBUG_TX,
   7743 		    ("%s: TX: finished transmitting packet, job %d\n",
   7744 			device_xname(sc->sc_dev), txq->txq_snext));
   7745 
   7746 		/* Advance the tx pointer. */
   7747 		txq->txq_free -= txs->txs_ndesc;
   7748 		txq->txq_next = nexttx;
   7749 
   7750 		txq->txq_sfree--;
   7751 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7752 
   7753 		/* Pass the packet to any BPF listeners. */
   7754 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7755 	}
   7756 
   7757 	if (m0 != NULL) {
   7758 		if (!is_transmit)
   7759 			ifp->if_flags |= IFF_OACTIVE;
   7760 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7761 		WM_Q_EVCNT_INCR(txq, descdrop);
   7762 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7763 			__func__));
   7764 		m_freem(m0);
   7765 	}
   7766 
   7767 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7768 		/* No more slots; notify upper layer. */
   7769 		if (!is_transmit)
   7770 			ifp->if_flags |= IFF_OACTIVE;
   7771 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7772 	}
   7773 
   7774 	if (txq->txq_free != ofree) {
   7775 		/* Set a watchdog timer in case the chip flakes out. */
   7776 		txq->txq_lastsent = time_uptime;
   7777 		txq->txq_sending = true;
   7778 	}
   7779 }
   7780 
   7781 /*
   7782  * wm_nq_tx_offload:
   7783  *
   7784  *	Set up TCP/IP checksumming parameters for the
   7785  *	specified packet, for NEWQUEUE devices
   7786  */
   7787 static int
   7788 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7789     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7790 {
   7791 	struct mbuf *m0 = txs->txs_mbuf;
   7792 	uint32_t vl_len, mssidx, cmdc;
   7793 	struct ether_header *eh;
   7794 	int offset, iphl;
   7795 
   7796 	/*
   7797 	 * XXX It would be nice if the mbuf pkthdr had offset
   7798 	 * fields for the protocol headers.
   7799 	 */
   7800 	*cmdlenp = 0;
   7801 	*fieldsp = 0;
   7802 
   7803 	eh = mtod(m0, struct ether_header *);
   7804 	switch (htons(eh->ether_type)) {
   7805 	case ETHERTYPE_IP:
   7806 	case ETHERTYPE_IPV6:
   7807 		offset = ETHER_HDR_LEN;
   7808 		break;
   7809 
   7810 	case ETHERTYPE_VLAN:
   7811 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7812 		break;
   7813 
   7814 	default:
   7815 		/* Don't support this protocol or encapsulation. */
   7816 		*do_csum = false;
   7817 		return 0;
   7818 	}
   7819 	*do_csum = true;
   7820 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7821 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7822 
   7823 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7824 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7825 
   7826 	if ((m0->m_pkthdr.csum_flags &
   7827 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7828 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7829 	} else {
   7830 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7831 	}
   7832 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7833 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7834 
   7835 	if (vlan_has_tag(m0)) {
   7836 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7837 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7838 		*cmdlenp |= NQTX_CMD_VLE;
   7839 	}
   7840 
   7841 	mssidx = 0;
   7842 
   7843 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7844 		int hlen = offset + iphl;
   7845 		int tcp_hlen;
   7846 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7847 
   7848 		if (__predict_false(m0->m_len <
   7849 				    (hlen + sizeof(struct tcphdr)))) {
   7850 			/*
   7851 			 * TCP/IP headers are not in the first mbuf; we need
   7852 			 * to do this the slow and painful way. Let's just
   7853 			 * hope this doesn't happen very often.
   7854 			 */
   7855 			struct tcphdr th;
   7856 
   7857 			WM_Q_EVCNT_INCR(txq, tsopain);
   7858 
   7859 			m_copydata(m0, hlen, sizeof(th), &th);
   7860 			if (v4) {
   7861 				struct ip ip;
   7862 
   7863 				m_copydata(m0, offset, sizeof(ip), &ip);
   7864 				ip.ip_len = 0;
   7865 				m_copyback(m0,
   7866 				    offset + offsetof(struct ip, ip_len),
   7867 				    sizeof(ip.ip_len), &ip.ip_len);
   7868 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7869 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7870 			} else {
   7871 				struct ip6_hdr ip6;
   7872 
   7873 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7874 				ip6.ip6_plen = 0;
   7875 				m_copyback(m0,
   7876 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7877 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7878 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7879 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7880 			}
   7881 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7882 			    sizeof(th.th_sum), &th.th_sum);
   7883 
   7884 			tcp_hlen = th.th_off << 2;
   7885 		} else {
   7886 			/*
   7887 			 * TCP/IP headers are in the first mbuf; we can do
   7888 			 * this the easy way.
   7889 			 */
   7890 			struct tcphdr *th;
   7891 
   7892 			if (v4) {
   7893 				struct ip *ip =
   7894 				    (void *)(mtod(m0, char *) + offset);
   7895 				th = (void *)(mtod(m0, char *) + hlen);
   7896 
   7897 				ip->ip_len = 0;
   7898 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7899 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7900 			} else {
   7901 				struct ip6_hdr *ip6 =
   7902 				    (void *)(mtod(m0, char *) + offset);
   7903 				th = (void *)(mtod(m0, char *) + hlen);
   7904 
   7905 				ip6->ip6_plen = 0;
   7906 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7907 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7908 			}
   7909 			tcp_hlen = th->th_off << 2;
   7910 		}
   7911 		hlen += tcp_hlen;
   7912 		*cmdlenp |= NQTX_CMD_TSE;
   7913 
   7914 		if (v4) {
   7915 			WM_Q_EVCNT_INCR(txq, tso);
   7916 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7917 		} else {
   7918 			WM_Q_EVCNT_INCR(txq, tso6);
   7919 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7920 		}
   7921 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7922 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7923 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7924 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7925 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7926 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7927 	} else {
   7928 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7929 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7930 	}
   7931 
   7932 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7933 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7934 		cmdc |= NQTXC_CMD_IP4;
   7935 	}
   7936 
   7937 	if (m0->m_pkthdr.csum_flags &
   7938 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7939 		WM_Q_EVCNT_INCR(txq, tusum);
   7940 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7941 			cmdc |= NQTXC_CMD_TCP;
   7942 		else
   7943 			cmdc |= NQTXC_CMD_UDP;
   7944 
   7945 		cmdc |= NQTXC_CMD_IP4;
   7946 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7947 	}
   7948 	if (m0->m_pkthdr.csum_flags &
   7949 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7950 		WM_Q_EVCNT_INCR(txq, tusum6);
   7951 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7952 			cmdc |= NQTXC_CMD_TCP;
   7953 		else
   7954 			cmdc |= NQTXC_CMD_UDP;
   7955 
   7956 		cmdc |= NQTXC_CMD_IP6;
   7957 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7958 	}
   7959 
   7960 	/*
   7961 	 * We don't have to write context descriptor for every packet to
   7962 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7963 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7964 	 * controllers.
   7965 	 * It would be overhead to write context descriptor for every packet,
   7966 	 * however it does not cause problems.
   7967 	 */
   7968 	/* Fill in the context descriptor. */
   7969 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7970 	    htole32(vl_len);
   7971 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7972 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7973 	    htole32(cmdc);
   7974 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7975 	    htole32(mssidx);
   7976 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7977 	DPRINTF(WM_DEBUG_TX,
   7978 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7979 		txq->txq_next, 0, vl_len));
   7980 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7981 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7982 	txs->txs_ndesc++;
   7983 	return 0;
   7984 }
   7985 
   7986 /*
   7987  * wm_nq_start:		[ifnet interface function]
   7988  *
   7989  *	Start packet transmission on the interface for NEWQUEUE devices
   7990  */
   7991 static void
   7992 wm_nq_start(struct ifnet *ifp)
   7993 {
   7994 	struct wm_softc *sc = ifp->if_softc;
   7995 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7996 
   7997 #ifdef WM_MPSAFE
   7998 	KASSERT(if_is_mpsafe(ifp));
   7999 #endif
   8000 	/*
   8001 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   8002 	 */
   8003 
   8004 	mutex_enter(txq->txq_lock);
   8005 	if (!txq->txq_stopping)
   8006 		wm_nq_start_locked(ifp);
   8007 	mutex_exit(txq->txq_lock);
   8008 }
   8009 
   8010 static void
   8011 wm_nq_start_locked(struct ifnet *ifp)
   8012 {
   8013 	struct wm_softc *sc = ifp->if_softc;
   8014 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8015 
   8016 	wm_nq_send_common_locked(ifp, txq, false);
   8017 }
   8018 
   8019 static int
   8020 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   8021 {
   8022 	int qid;
   8023 	struct wm_softc *sc = ifp->if_softc;
   8024 	struct wm_txqueue *txq;
   8025 
   8026 	qid = wm_select_txqueue(ifp, m);
   8027 	txq = &sc->sc_queue[qid].wmq_txq;
   8028 
   8029 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   8030 		m_freem(m);
   8031 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   8032 		return ENOBUFS;
   8033 	}
   8034 
   8035 	/*
   8036 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   8037 	 */
   8038 	ifp->if_obytes += m->m_pkthdr.len;
   8039 	if (m->m_flags & M_MCAST)
   8040 		ifp->if_omcasts++;
   8041 
   8042 	/*
   8043 	 * The situations which this mutex_tryenter() fails at running time
   8044 	 * are below two patterns.
   8045 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   8046 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   8047 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   8048 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   8049 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   8050 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   8051 	 * stuck, either.
   8052 	 */
   8053 	if (mutex_tryenter(txq->txq_lock)) {
   8054 		if (!txq->txq_stopping)
   8055 			wm_nq_transmit_locked(ifp, txq);
   8056 		mutex_exit(txq->txq_lock);
   8057 	}
   8058 
   8059 	return 0;
   8060 }
   8061 
   8062 static void
   8063 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   8064 {
   8065 
   8066 	wm_nq_send_common_locked(ifp, txq, true);
   8067 }
   8068 
   8069 static void
   8070 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   8071     bool is_transmit)
   8072 {
   8073 	struct wm_softc *sc = ifp->if_softc;
   8074 	struct mbuf *m0;
   8075 	struct wm_txsoft *txs;
   8076 	bus_dmamap_t dmamap;
   8077 	int error, nexttx, lasttx = -1, seg, segs_needed;
   8078 	bool do_csum, sent;
   8079 	bool remap = true;
   8080 
   8081 	KASSERT(mutex_owned(txq->txq_lock));
   8082 
   8083 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   8084 		return;
   8085 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   8086 		return;
   8087 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   8088 		return;
   8089 
   8090 	sent = false;
   8091 
   8092 	/*
   8093 	 * Loop through the send queue, setting up transmit descriptors
   8094 	 * until we drain the queue, or use up all available transmit
   8095 	 * descriptors.
   8096 	 */
   8097 	for (;;) {
   8098 		m0 = NULL;
   8099 
   8100 		/* Get a work queue entry. */
   8101 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   8102 			wm_txeof(txq, UINT_MAX);
   8103 			if (txq->txq_sfree == 0) {
   8104 				DPRINTF(WM_DEBUG_TX,
   8105 				    ("%s: TX: no free job descriptors\n",
   8106 					device_xname(sc->sc_dev)));
   8107 				WM_Q_EVCNT_INCR(txq, txsstall);
   8108 				break;
   8109 			}
   8110 		}
   8111 
   8112 		/* Grab a packet off the queue. */
   8113 		if (is_transmit)
   8114 			m0 = pcq_get(txq->txq_interq);
   8115 		else
   8116 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   8117 		if (m0 == NULL)
   8118 			break;
   8119 
   8120 		DPRINTF(WM_DEBUG_TX,
   8121 		    ("%s: TX: have packet to transmit: %p\n",
   8122 		    device_xname(sc->sc_dev), m0));
   8123 
   8124 		txs = &txq->txq_soft[txq->txq_snext];
   8125 		dmamap = txs->txs_dmamap;
   8126 
   8127 		/*
   8128 		 * Load the DMA map.  If this fails, the packet either
   8129 		 * didn't fit in the allotted number of segments, or we
   8130 		 * were short on resources.  For the too-many-segments
   8131 		 * case, we simply report an error and drop the packet,
   8132 		 * since we can't sanely copy a jumbo packet to a single
   8133 		 * buffer.
   8134 		 */
   8135 retry:
   8136 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   8137 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   8138 		if (__predict_false(error)) {
   8139 			if (error == EFBIG) {
   8140 				if (remap == true) {
   8141 					struct mbuf *m;
   8142 
   8143 					remap = false;
   8144 					m = m_defrag(m0, M_NOWAIT);
   8145 					if (m != NULL) {
   8146 						WM_Q_EVCNT_INCR(txq, defrag);
   8147 						m0 = m;
   8148 						goto retry;
   8149 					}
   8150 				}
   8151 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8152 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8153 				    "DMA segments, dropping...\n",
   8154 				    device_xname(sc->sc_dev));
   8155 				wm_dump_mbuf_chain(sc, m0);
   8156 				m_freem(m0);
   8157 				continue;
   8158 			}
   8159 			/* Short on resources, just stop for now. */
   8160 			DPRINTF(WM_DEBUG_TX,
   8161 			    ("%s: TX: dmamap load failed: %d\n",
   8162 				device_xname(sc->sc_dev), error));
   8163 			break;
   8164 		}
   8165 
   8166 		segs_needed = dmamap->dm_nsegs;
   8167 
   8168 		/*
   8169 		 * Ensure we have enough descriptors free to describe
   8170 		 * the packet. Note, we always reserve one descriptor
   8171 		 * at the end of the ring due to the semantics of the
   8172 		 * TDT register, plus one more in the event we need
   8173 		 * to load offload context.
   8174 		 */
   8175 		if (segs_needed > txq->txq_free - 2) {
   8176 			/*
   8177 			 * Not enough free descriptors to transmit this
   8178 			 * packet.  We haven't committed anything yet,
   8179 			 * so just unload the DMA map, put the packet
   8180 			 * pack on the queue, and punt. Notify the upper
   8181 			 * layer that there are no more slots left.
   8182 			 */
   8183 			DPRINTF(WM_DEBUG_TX,
   8184 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8185 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8186 				segs_needed, txq->txq_free - 1));
   8187 			if (!is_transmit)
   8188 				ifp->if_flags |= IFF_OACTIVE;
   8189 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8190 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8191 			WM_Q_EVCNT_INCR(txq, txdstall);
   8192 			break;
   8193 		}
   8194 
   8195 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8196 
   8197 		DPRINTF(WM_DEBUG_TX,
   8198 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8199 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8200 
   8201 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8202 
   8203 		/*
   8204 		 * Store a pointer to the packet so that we can free it
   8205 		 * later.
   8206 		 *
   8207 		 * Initially, we consider the number of descriptors the
   8208 		 * packet uses the number of DMA segments.  This may be
   8209 		 * incremented by 1 if we do checksum offload (a descriptor
   8210 		 * is used to set the checksum context).
   8211 		 */
   8212 		txs->txs_mbuf = m0;
   8213 		txs->txs_firstdesc = txq->txq_next;
   8214 		txs->txs_ndesc = segs_needed;
   8215 
   8216 		/* Set up offload parameters for this packet. */
   8217 		uint32_t cmdlen, fields, dcmdlen;
   8218 		if (m0->m_pkthdr.csum_flags &
   8219 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8220 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8221 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8222 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8223 			    &do_csum) != 0) {
   8224 				/* Error message already displayed. */
   8225 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8226 				continue;
   8227 			}
   8228 		} else {
   8229 			do_csum = false;
   8230 			cmdlen = 0;
   8231 			fields = 0;
   8232 		}
   8233 
   8234 		/* Sync the DMA map. */
   8235 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8236 		    BUS_DMASYNC_PREWRITE);
   8237 
   8238 		/* Initialize the first transmit descriptor. */
   8239 		nexttx = txq->txq_next;
   8240 		if (!do_csum) {
   8241 			/* setup a legacy descriptor */
   8242 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8243 			    dmamap->dm_segs[0].ds_addr);
   8244 			txq->txq_descs[nexttx].wtx_cmdlen =
   8245 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8246 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8247 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8248 			if (vlan_has_tag(m0)) {
   8249 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8250 				    htole32(WTX_CMD_VLE);
   8251 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8252 				    htole16(vlan_get_tag(m0));
   8253 			} else
   8254 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8255 
   8256 			dcmdlen = 0;
   8257 		} else {
   8258 			/* setup an advanced data descriptor */
   8259 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8260 			    htole64(dmamap->dm_segs[0].ds_addr);
   8261 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8262 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8263 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8264 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8265 			    htole32(fields);
   8266 			DPRINTF(WM_DEBUG_TX,
   8267 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8268 				device_xname(sc->sc_dev), nexttx,
   8269 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8270 			DPRINTF(WM_DEBUG_TX,
   8271 			    ("\t 0x%08x%08x\n", fields,
   8272 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8273 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8274 		}
   8275 
   8276 		lasttx = nexttx;
   8277 		nexttx = WM_NEXTTX(txq, nexttx);
   8278 		/*
   8279 		 * fill in the next descriptors. legacy or advanced format
   8280 		 * is the same here
   8281 		 */
   8282 		for (seg = 1; seg < dmamap->dm_nsegs;
   8283 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8284 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8285 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8286 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8287 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8288 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8289 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8290 			lasttx = nexttx;
   8291 
   8292 			DPRINTF(WM_DEBUG_TX,
   8293 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8294 				device_xname(sc->sc_dev), nexttx,
   8295 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8296 				dmamap->dm_segs[seg].ds_len));
   8297 		}
   8298 
   8299 		KASSERT(lasttx != -1);
   8300 
   8301 		/*
   8302 		 * Set up the command byte on the last descriptor of
   8303 		 * the packet. If we're in the interrupt delay window,
   8304 		 * delay the interrupt.
   8305 		 */
   8306 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8307 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8308 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8309 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8310 
   8311 		txs->txs_lastdesc = lasttx;
   8312 
   8313 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8314 		    device_xname(sc->sc_dev),
   8315 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8316 
   8317 		/* Sync the descriptors we're using. */
   8318 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8319 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8320 
   8321 		/* Give the packet to the chip. */
   8322 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8323 		sent = true;
   8324 
   8325 		DPRINTF(WM_DEBUG_TX,
   8326 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8327 
   8328 		DPRINTF(WM_DEBUG_TX,
   8329 		    ("%s: TX: finished transmitting packet, job %d\n",
   8330 			device_xname(sc->sc_dev), txq->txq_snext));
   8331 
   8332 		/* Advance the tx pointer. */
   8333 		txq->txq_free -= txs->txs_ndesc;
   8334 		txq->txq_next = nexttx;
   8335 
   8336 		txq->txq_sfree--;
   8337 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8338 
   8339 		/* Pass the packet to any BPF listeners. */
   8340 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8341 	}
   8342 
   8343 	if (m0 != NULL) {
   8344 		if (!is_transmit)
   8345 			ifp->if_flags |= IFF_OACTIVE;
   8346 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8347 		WM_Q_EVCNT_INCR(txq, descdrop);
   8348 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8349 			__func__));
   8350 		m_freem(m0);
   8351 	}
   8352 
   8353 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8354 		/* No more slots; notify upper layer. */
   8355 		if (!is_transmit)
   8356 			ifp->if_flags |= IFF_OACTIVE;
   8357 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8358 	}
   8359 
   8360 	if (sent) {
   8361 		/* Set a watchdog timer in case the chip flakes out. */
   8362 		txq->txq_lastsent = time_uptime;
   8363 		txq->txq_sending = true;
   8364 	}
   8365 }
   8366 
   8367 static void
   8368 wm_deferred_start_locked(struct wm_txqueue *txq)
   8369 {
   8370 	struct wm_softc *sc = txq->txq_sc;
   8371 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8372 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8373 	int qid = wmq->wmq_id;
   8374 
   8375 	KASSERT(mutex_owned(txq->txq_lock));
   8376 
   8377 	if (txq->txq_stopping) {
   8378 		mutex_exit(txq->txq_lock);
   8379 		return;
   8380 	}
   8381 
   8382 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8383 		/* XXX need for ALTQ or one CPU system */
   8384 		if (qid == 0)
   8385 			wm_nq_start_locked(ifp);
   8386 		wm_nq_transmit_locked(ifp, txq);
   8387 	} else {
   8388 		/* XXX need for ALTQ or one CPU system */
   8389 		if (qid == 0)
   8390 			wm_start_locked(ifp);
   8391 		wm_transmit_locked(ifp, txq);
   8392 	}
   8393 }
   8394 
   8395 /* Interrupt */
   8396 
   8397 /*
   8398  * wm_txeof:
   8399  *
   8400  *	Helper; handle transmit interrupts.
   8401  */
   8402 static bool
   8403 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8404 {
   8405 	struct wm_softc *sc = txq->txq_sc;
   8406 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8407 	struct wm_txsoft *txs;
   8408 	int count = 0;
   8409 	int i;
   8410 	uint8_t status;
   8411 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8412 	bool more = false;
   8413 
   8414 	KASSERT(mutex_owned(txq->txq_lock));
   8415 
   8416 	if (txq->txq_stopping)
   8417 		return false;
   8418 
   8419 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8420 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8421 	if (wmq->wmq_id == 0)
   8422 		ifp->if_flags &= ~IFF_OACTIVE;
   8423 
   8424 	/*
   8425 	 * Go through the Tx list and free mbufs for those
   8426 	 * frames which have been transmitted.
   8427 	 */
   8428 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8429 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8430 		if (limit-- == 0) {
   8431 			more = true;
   8432 			DPRINTF(WM_DEBUG_TX,
   8433 			    ("%s: TX: loop limited, job %d is not processed\n",
   8434 				device_xname(sc->sc_dev), i));
   8435 			break;
   8436 		}
   8437 
   8438 		txs = &txq->txq_soft[i];
   8439 
   8440 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8441 			device_xname(sc->sc_dev), i));
   8442 
   8443 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8444 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8445 
   8446 		status =
   8447 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8448 		if ((status & WTX_ST_DD) == 0) {
   8449 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8450 			    BUS_DMASYNC_PREREAD);
   8451 			break;
   8452 		}
   8453 
   8454 		count++;
   8455 		DPRINTF(WM_DEBUG_TX,
   8456 		    ("%s: TX: job %d done: descs %d..%d\n",
   8457 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8458 		    txs->txs_lastdesc));
   8459 
   8460 		/*
   8461 		 * XXX We should probably be using the statistics
   8462 		 * XXX registers, but I don't know if they exist
   8463 		 * XXX on chips before the i82544.
   8464 		 */
   8465 
   8466 #ifdef WM_EVENT_COUNTERS
   8467 		if (status & WTX_ST_TU)
   8468 			WM_Q_EVCNT_INCR(txq, underrun);
   8469 #endif /* WM_EVENT_COUNTERS */
   8470 
   8471 		/*
   8472 		 * 82574 and newer's document says the status field has neither
   8473 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8474 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8475 		 * Developer's Manual", 82574 datasheet and newer.
   8476 		 *
   8477 		 * XXX I saw the LC bit was set on I218 even though the media
   8478 		 * was full duplex, so the bit might be used for other
   8479 		 * meaning ...(I have no document).
   8480 		 */
   8481 
   8482 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8483 		    && ((sc->sc_type < WM_T_82574)
   8484 			|| (sc->sc_type == WM_T_80003))) {
   8485 			ifp->if_oerrors++;
   8486 			if (status & WTX_ST_LC)
   8487 				log(LOG_WARNING, "%s: late collision\n",
   8488 				    device_xname(sc->sc_dev));
   8489 			else if (status & WTX_ST_EC) {
   8490 				ifp->if_collisions +=
   8491 				    TX_COLLISION_THRESHOLD + 1;
   8492 				log(LOG_WARNING, "%s: excessive collisions\n",
   8493 				    device_xname(sc->sc_dev));
   8494 			}
   8495 		} else
   8496 			ifp->if_opackets++;
   8497 
   8498 		txq->txq_packets++;
   8499 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8500 
   8501 		txq->txq_free += txs->txs_ndesc;
   8502 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8503 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8504 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8505 		m_freem(txs->txs_mbuf);
   8506 		txs->txs_mbuf = NULL;
   8507 	}
   8508 
   8509 	/* Update the dirty transmit buffer pointer. */
   8510 	txq->txq_sdirty = i;
   8511 	DPRINTF(WM_DEBUG_TX,
   8512 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8513 
   8514 	if (count != 0)
   8515 		rnd_add_uint32(&sc->rnd_source, count);
   8516 
   8517 	/*
   8518 	 * If there are no more pending transmissions, cancel the watchdog
   8519 	 * timer.
   8520 	 */
   8521 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8522 		txq->txq_sending = false;
   8523 
   8524 	return more;
   8525 }
   8526 
   8527 static inline uint32_t
   8528 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8529 {
   8530 	struct wm_softc *sc = rxq->rxq_sc;
   8531 
   8532 	if (sc->sc_type == WM_T_82574)
   8533 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8534 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8535 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8536 	else
   8537 		return rxq->rxq_descs[idx].wrx_status;
   8538 }
   8539 
   8540 static inline uint32_t
   8541 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8542 {
   8543 	struct wm_softc *sc = rxq->rxq_sc;
   8544 
   8545 	if (sc->sc_type == WM_T_82574)
   8546 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8547 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8548 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8549 	else
   8550 		return rxq->rxq_descs[idx].wrx_errors;
   8551 }
   8552 
   8553 static inline uint16_t
   8554 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8555 {
   8556 	struct wm_softc *sc = rxq->rxq_sc;
   8557 
   8558 	if (sc->sc_type == WM_T_82574)
   8559 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8560 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8561 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8562 	else
   8563 		return rxq->rxq_descs[idx].wrx_special;
   8564 }
   8565 
   8566 static inline int
   8567 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8568 {
   8569 	struct wm_softc *sc = rxq->rxq_sc;
   8570 
   8571 	if (sc->sc_type == WM_T_82574)
   8572 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8573 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8574 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8575 	else
   8576 		return rxq->rxq_descs[idx].wrx_len;
   8577 }
   8578 
   8579 #ifdef WM_DEBUG
   8580 static inline uint32_t
   8581 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8582 {
   8583 	struct wm_softc *sc = rxq->rxq_sc;
   8584 
   8585 	if (sc->sc_type == WM_T_82574)
   8586 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8587 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8588 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8589 	else
   8590 		return 0;
   8591 }
   8592 
   8593 static inline uint8_t
   8594 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8595 {
   8596 	struct wm_softc *sc = rxq->rxq_sc;
   8597 
   8598 	if (sc->sc_type == WM_T_82574)
   8599 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8600 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8601 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8602 	else
   8603 		return 0;
   8604 }
   8605 #endif /* WM_DEBUG */
   8606 
   8607 static inline bool
   8608 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8609     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8610 {
   8611 
   8612 	if (sc->sc_type == WM_T_82574)
   8613 		return (status & ext_bit) != 0;
   8614 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8615 		return (status & nq_bit) != 0;
   8616 	else
   8617 		return (status & legacy_bit) != 0;
   8618 }
   8619 
   8620 static inline bool
   8621 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8622     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8623 {
   8624 
   8625 	if (sc->sc_type == WM_T_82574)
   8626 		return (error & ext_bit) != 0;
   8627 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8628 		return (error & nq_bit) != 0;
   8629 	else
   8630 		return (error & legacy_bit) != 0;
   8631 }
   8632 
   8633 static inline bool
   8634 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8635 {
   8636 
   8637 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8638 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8639 		return true;
   8640 	else
   8641 		return false;
   8642 }
   8643 
   8644 static inline bool
   8645 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8646 {
   8647 	struct wm_softc *sc = rxq->rxq_sc;
   8648 
   8649 	/* XXXX missing error bit for newqueue? */
   8650 	if (wm_rxdesc_is_set_error(sc, errors,
   8651 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8652 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8653 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8654 		NQRXC_ERROR_RXE)) {
   8655 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8656 		    EXTRXC_ERROR_SE, 0))
   8657 			log(LOG_WARNING, "%s: symbol error\n",
   8658 			    device_xname(sc->sc_dev));
   8659 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8660 		    EXTRXC_ERROR_SEQ, 0))
   8661 			log(LOG_WARNING, "%s: receive sequence error\n",
   8662 			    device_xname(sc->sc_dev));
   8663 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8664 		    EXTRXC_ERROR_CE, 0))
   8665 			log(LOG_WARNING, "%s: CRC error\n",
   8666 			    device_xname(sc->sc_dev));
   8667 		return true;
   8668 	}
   8669 
   8670 	return false;
   8671 }
   8672 
   8673 static inline bool
   8674 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8675 {
   8676 	struct wm_softc *sc = rxq->rxq_sc;
   8677 
   8678 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8679 		NQRXC_STATUS_DD)) {
   8680 		/* We have processed all of the receive descriptors. */
   8681 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8682 		return false;
   8683 	}
   8684 
   8685 	return true;
   8686 }
   8687 
   8688 static inline bool
   8689 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8690     uint16_t vlantag, struct mbuf *m)
   8691 {
   8692 
   8693 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8694 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8695 		vlan_set_tag(m, le16toh(vlantag));
   8696 	}
   8697 
   8698 	return true;
   8699 }
   8700 
   8701 static inline void
   8702 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8703     uint32_t errors, struct mbuf *m)
   8704 {
   8705 	struct wm_softc *sc = rxq->rxq_sc;
   8706 
   8707 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8708 		if (wm_rxdesc_is_set_status(sc, status,
   8709 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8710 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8711 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8712 			if (wm_rxdesc_is_set_error(sc, errors,
   8713 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8714 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8715 		}
   8716 		if (wm_rxdesc_is_set_status(sc, status,
   8717 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8718 			/*
   8719 			 * Note: we don't know if this was TCP or UDP,
   8720 			 * so we just set both bits, and expect the
   8721 			 * upper layers to deal.
   8722 			 */
   8723 			WM_Q_EVCNT_INCR(rxq, tusum);
   8724 			m->m_pkthdr.csum_flags |=
   8725 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8726 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8727 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8728 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8729 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8730 		}
   8731 	}
   8732 }
   8733 
   8734 /*
   8735  * wm_rxeof:
   8736  *
   8737  *	Helper; handle receive interrupts.
   8738  */
   8739 static bool
   8740 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8741 {
   8742 	struct wm_softc *sc = rxq->rxq_sc;
   8743 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8744 	struct wm_rxsoft *rxs;
   8745 	struct mbuf *m;
   8746 	int i, len;
   8747 	int count = 0;
   8748 	uint32_t status, errors;
   8749 	uint16_t vlantag;
   8750 	bool more = false;
   8751 
   8752 	KASSERT(mutex_owned(rxq->rxq_lock));
   8753 
   8754 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8755 		if (limit-- == 0) {
   8756 			rxq->rxq_ptr = i;
   8757 			more = true;
   8758 			DPRINTF(WM_DEBUG_RX,
   8759 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8760 				device_xname(sc->sc_dev), i));
   8761 			break;
   8762 		}
   8763 
   8764 		rxs = &rxq->rxq_soft[i];
   8765 
   8766 		DPRINTF(WM_DEBUG_RX,
   8767 		    ("%s: RX: checking descriptor %d\n",
   8768 			device_xname(sc->sc_dev), i));
   8769 		wm_cdrxsync(rxq, i,
   8770 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8771 
   8772 		status = wm_rxdesc_get_status(rxq, i);
   8773 		errors = wm_rxdesc_get_errors(rxq, i);
   8774 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8775 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8776 #ifdef WM_DEBUG
   8777 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8778 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8779 #endif
   8780 
   8781 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8782 			/*
   8783 			 * Update the receive pointer holding rxq_lock
   8784 			 * consistent with increment counter.
   8785 			 */
   8786 			rxq->rxq_ptr = i;
   8787 			break;
   8788 		}
   8789 
   8790 		count++;
   8791 		if (__predict_false(rxq->rxq_discard)) {
   8792 			DPRINTF(WM_DEBUG_RX,
   8793 			    ("%s: RX: discarding contents of descriptor %d\n",
   8794 				device_xname(sc->sc_dev), i));
   8795 			wm_init_rxdesc(rxq, i);
   8796 			if (wm_rxdesc_is_eop(rxq, status)) {
   8797 				/* Reset our state. */
   8798 				DPRINTF(WM_DEBUG_RX,
   8799 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8800 					device_xname(sc->sc_dev)));
   8801 				rxq->rxq_discard = 0;
   8802 			}
   8803 			continue;
   8804 		}
   8805 
   8806 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8807 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8808 
   8809 		m = rxs->rxs_mbuf;
   8810 
   8811 		/*
   8812 		 * Add a new receive buffer to the ring, unless of
   8813 		 * course the length is zero. Treat the latter as a
   8814 		 * failed mapping.
   8815 		 */
   8816 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8817 			/*
   8818 			 * Failed, throw away what we've done so
   8819 			 * far, and discard the rest of the packet.
   8820 			 */
   8821 			ifp->if_ierrors++;
   8822 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8823 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8824 			wm_init_rxdesc(rxq, i);
   8825 			if (!wm_rxdesc_is_eop(rxq, status))
   8826 				rxq->rxq_discard = 1;
   8827 			if (rxq->rxq_head != NULL)
   8828 				m_freem(rxq->rxq_head);
   8829 			WM_RXCHAIN_RESET(rxq);
   8830 			DPRINTF(WM_DEBUG_RX,
   8831 			    ("%s: RX: Rx buffer allocation failed, "
   8832 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8833 				rxq->rxq_discard ? " (discard)" : ""));
   8834 			continue;
   8835 		}
   8836 
   8837 		m->m_len = len;
   8838 		rxq->rxq_len += len;
   8839 		DPRINTF(WM_DEBUG_RX,
   8840 		    ("%s: RX: buffer at %p len %d\n",
   8841 			device_xname(sc->sc_dev), m->m_data, len));
   8842 
   8843 		/* If this is not the end of the packet, keep looking. */
   8844 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8845 			WM_RXCHAIN_LINK(rxq, m);
   8846 			DPRINTF(WM_DEBUG_RX,
   8847 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8848 				device_xname(sc->sc_dev), rxq->rxq_len));
   8849 			continue;
   8850 		}
   8851 
   8852 		/*
   8853 		 * Okay, we have the entire packet now. The chip is
   8854 		 * configured to include the FCS except I350 and I21[01]
   8855 		 * (not all chips can be configured to strip it),
   8856 		 * so we need to trim it.
   8857 		 * May need to adjust length of previous mbuf in the
   8858 		 * chain if the current mbuf is too short.
   8859 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8860 		 * is always set in I350, so we don't trim it.
   8861 		 */
   8862 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8863 		    && (sc->sc_type != WM_T_I210)
   8864 		    && (sc->sc_type != WM_T_I211)) {
   8865 			if (m->m_len < ETHER_CRC_LEN) {
   8866 				rxq->rxq_tail->m_len
   8867 				    -= (ETHER_CRC_LEN - m->m_len);
   8868 				m->m_len = 0;
   8869 			} else
   8870 				m->m_len -= ETHER_CRC_LEN;
   8871 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8872 		} else
   8873 			len = rxq->rxq_len;
   8874 
   8875 		WM_RXCHAIN_LINK(rxq, m);
   8876 
   8877 		*rxq->rxq_tailp = NULL;
   8878 		m = rxq->rxq_head;
   8879 
   8880 		WM_RXCHAIN_RESET(rxq);
   8881 
   8882 		DPRINTF(WM_DEBUG_RX,
   8883 		    ("%s: RX: have entire packet, len -> %d\n",
   8884 			device_xname(sc->sc_dev), len));
   8885 
   8886 		/* If an error occurred, update stats and drop the packet. */
   8887 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8888 			m_freem(m);
   8889 			continue;
   8890 		}
   8891 
   8892 		/* No errors.  Receive the packet. */
   8893 		m_set_rcvif(m, ifp);
   8894 		m->m_pkthdr.len = len;
   8895 		/*
   8896 		 * TODO
   8897 		 * should be save rsshash and rsstype to this mbuf.
   8898 		 */
   8899 		DPRINTF(WM_DEBUG_RX,
   8900 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8901 			device_xname(sc->sc_dev), rsstype, rsshash));
   8902 
   8903 		/*
   8904 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8905 		 * for us.  Associate the tag with the packet.
   8906 		 */
   8907 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8908 			continue;
   8909 
   8910 		/* Set up checksum info for this packet. */
   8911 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8912 		/*
   8913 		 * Update the receive pointer holding rxq_lock consistent with
   8914 		 * increment counter.
   8915 		 */
   8916 		rxq->rxq_ptr = i;
   8917 		rxq->rxq_packets++;
   8918 		rxq->rxq_bytes += len;
   8919 		mutex_exit(rxq->rxq_lock);
   8920 
   8921 		/* Pass it on. */
   8922 		if_percpuq_enqueue(sc->sc_ipq, m);
   8923 
   8924 		mutex_enter(rxq->rxq_lock);
   8925 
   8926 		if (rxq->rxq_stopping)
   8927 			break;
   8928 	}
   8929 
   8930 	if (count != 0)
   8931 		rnd_add_uint32(&sc->rnd_source, count);
   8932 
   8933 	DPRINTF(WM_DEBUG_RX,
   8934 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8935 
   8936 	return more;
   8937 }
   8938 
   8939 /*
   8940  * wm_linkintr_gmii:
   8941  *
   8942  *	Helper; handle link interrupts for GMII.
   8943  */
   8944 static void
   8945 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8946 {
   8947 
   8948 	KASSERT(WM_CORE_LOCKED(sc));
   8949 
   8950 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8951 		__func__));
   8952 
   8953 	if (icr & ICR_LSC) {
   8954 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8955 		uint32_t reg;
   8956 		bool link;
   8957 
   8958 		link = status & STATUS_LU;
   8959 		if (link) {
   8960 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8961 				device_xname(sc->sc_dev),
   8962 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8963 		} else {
   8964 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8965 				device_xname(sc->sc_dev)));
   8966 		}
   8967 		if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8968 			wm_gig_downshift_workaround_ich8lan(sc);
   8969 
   8970 		if ((sc->sc_type == WM_T_ICH8)
   8971 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8972 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8973 		}
   8974 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8975 			device_xname(sc->sc_dev)));
   8976 		mii_pollstat(&sc->sc_mii);
   8977 		if (sc->sc_type == WM_T_82543) {
   8978 			int miistatus, active;
   8979 
   8980 			/*
   8981 			 * With 82543, we need to force speed and
   8982 			 * duplex on the MAC equal to what the PHY
   8983 			 * speed and duplex configuration is.
   8984 			 */
   8985 			miistatus = sc->sc_mii.mii_media_status;
   8986 
   8987 			if (miistatus & IFM_ACTIVE) {
   8988 				active = sc->sc_mii.mii_media_active;
   8989 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8990 				switch (IFM_SUBTYPE(active)) {
   8991 				case IFM_10_T:
   8992 					sc->sc_ctrl |= CTRL_SPEED_10;
   8993 					break;
   8994 				case IFM_100_TX:
   8995 					sc->sc_ctrl |= CTRL_SPEED_100;
   8996 					break;
   8997 				case IFM_1000_T:
   8998 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8999 					break;
   9000 				default:
   9001 					/*
   9002 					 * fiber?
   9003 					 * Shoud not enter here.
   9004 					 */
   9005 					printf("unknown media (%x)\n", active);
   9006 					break;
   9007 				}
   9008 				if (active & IFM_FDX)
   9009 					sc->sc_ctrl |= CTRL_FD;
   9010 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9011 			}
   9012 		} else if (sc->sc_type == WM_T_PCH) {
   9013 			wm_k1_gig_workaround_hv(sc,
   9014 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9015 		}
   9016 
   9017 		/*
   9018 		 * I217 Packet Loss issue:
   9019 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   9020 		 * on power up.
   9021 		 * Set the Beacon Duration for I217 to 8 usec
   9022 		 */
   9023 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9024 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   9025 			reg &= ~FEXTNVM4_BEACON_DURATION;
   9026 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   9027 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   9028 		}
   9029 
   9030 		/* Work-around I218 hang issue */
   9031 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   9032 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   9033 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   9034 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   9035 			wm_k1_workaround_lpt_lp(sc, link);
   9036 
   9037 		if (sc->sc_type >= WM_T_PCH_LPT) {
   9038 			/*
   9039 			 * Set platform power management values for Latency
   9040 			 * Tolerance Reporting (LTR)
   9041 			 */
   9042 			wm_platform_pm_pch_lpt(sc,
   9043 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   9044 		}
   9045 
   9046 		/* Clear link partner's EEE ability */
   9047 		sc->eee_lp_ability = 0;
   9048 
   9049 		/* FEXTNVM6 K1-off workaround */
   9050 		if (sc->sc_type == WM_T_PCH_SPT) {
   9051 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   9052 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   9053 			    & FEXTNVM6_K1_OFF_ENABLE)
   9054 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   9055 			else
   9056 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   9057 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   9058 		}
   9059 
   9060 		if (!link)
   9061 			return;
   9062 
   9063 		switch (sc->sc_type) {
   9064 		case WM_T_PCH2:
   9065 			wm_k1_workaround_lv(sc);
   9066 			/* FALLTHROUGH */
   9067 		case WM_T_PCH:
   9068 			if (sc->sc_phytype == WMPHY_82578)
   9069 				wm_link_stall_workaround_hv(sc);
   9070 			break;
   9071 		default:
   9072 			break;
   9073 		}
   9074 
   9075 		/* Enable/Disable EEE after link up */
   9076 		if (sc->sc_phytype > WMPHY_82579)
   9077 			wm_set_eee_pchlan(sc);
   9078 
   9079 	} else if (icr & ICR_RXSEQ) {
   9080 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   9081 			device_xname(sc->sc_dev)));
   9082 	}
   9083 }
   9084 
   9085 /*
   9086  * wm_linkintr_tbi:
   9087  *
   9088  *	Helper; handle link interrupts for TBI mode.
   9089  */
   9090 static void
   9091 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   9092 {
   9093 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9094 	uint32_t status;
   9095 
   9096 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9097 		__func__));
   9098 
   9099 	status = CSR_READ(sc, WMREG_STATUS);
   9100 	if (icr & ICR_LSC) {
   9101 		wm_check_for_link(sc);
   9102 		if (status & STATUS_LU) {
   9103 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   9104 				device_xname(sc->sc_dev),
   9105 				(status & STATUS_FD) ? "FDX" : "HDX"));
   9106 			/*
   9107 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9108 			 * so we should update sc->sc_ctrl
   9109 			 */
   9110 
   9111 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9112 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9113 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9114 			if (status & STATUS_FD)
   9115 				sc->sc_tctl |=
   9116 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9117 			else
   9118 				sc->sc_tctl |=
   9119 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9120 			if (sc->sc_ctrl & CTRL_TFCE)
   9121 				sc->sc_fcrtl |= FCRTL_XONE;
   9122 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9123 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9124 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   9125 			sc->sc_tbi_linkup = 1;
   9126 			if_link_state_change(ifp, LINK_STATE_UP);
   9127 		} else {
   9128 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9129 				device_xname(sc->sc_dev)));
   9130 			sc->sc_tbi_linkup = 0;
   9131 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9132 		}
   9133 		/* Update LED */
   9134 		wm_tbi_serdes_set_linkled(sc);
   9135 	} else if (icr & ICR_RXSEQ) {
   9136 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9137 			device_xname(sc->sc_dev)));
   9138 	}
   9139 }
   9140 
   9141 /*
   9142  * wm_linkintr_serdes:
   9143  *
   9144  *	Helper; handle link interrupts for TBI mode.
   9145  */
   9146 static void
   9147 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   9148 {
   9149 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9150 	struct mii_data *mii = &sc->sc_mii;
   9151 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9152 	uint32_t pcs_adv, pcs_lpab, reg;
   9153 
   9154 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9155 		__func__));
   9156 
   9157 	if (icr & ICR_LSC) {
   9158 		/* Check PCS */
   9159 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9160 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9161 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9162 				device_xname(sc->sc_dev)));
   9163 			mii->mii_media_status |= IFM_ACTIVE;
   9164 			sc->sc_tbi_linkup = 1;
   9165 			if_link_state_change(ifp, LINK_STATE_UP);
   9166 		} else {
   9167 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9168 				device_xname(sc->sc_dev)));
   9169 			mii->mii_media_status |= IFM_NONE;
   9170 			sc->sc_tbi_linkup = 0;
   9171 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9172 			wm_tbi_serdes_set_linkled(sc);
   9173 			return;
   9174 		}
   9175 		mii->mii_media_active |= IFM_1000_SX;
   9176 		if ((reg & PCS_LSTS_FDX) != 0)
   9177 			mii->mii_media_active |= IFM_FDX;
   9178 		else
   9179 			mii->mii_media_active |= IFM_HDX;
   9180 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9181 			/* Check flow */
   9182 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9183 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9184 				DPRINTF(WM_DEBUG_LINK,
   9185 				    ("XXX LINKOK but not ACOMP\n"));
   9186 				return;
   9187 			}
   9188 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9189 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9190 			DPRINTF(WM_DEBUG_LINK,
   9191 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9192 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9193 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9194 				mii->mii_media_active |= IFM_FLOW
   9195 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9196 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9197 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9198 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9199 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9200 				mii->mii_media_active |= IFM_FLOW
   9201 				    | IFM_ETH_TXPAUSE;
   9202 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9203 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9204 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9205 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9206 				mii->mii_media_active |= IFM_FLOW
   9207 				    | IFM_ETH_RXPAUSE;
   9208 		}
   9209 		/* Update LED */
   9210 		wm_tbi_serdes_set_linkled(sc);
   9211 	} else {
   9212 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9213 		    device_xname(sc->sc_dev)));
   9214 	}
   9215 }
   9216 
   9217 /*
   9218  * wm_linkintr:
   9219  *
   9220  *	Helper; handle link interrupts.
   9221  */
   9222 static void
   9223 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9224 {
   9225 
   9226 	KASSERT(WM_CORE_LOCKED(sc));
   9227 
   9228 	if (sc->sc_flags & WM_F_HAS_MII)
   9229 		wm_linkintr_gmii(sc, icr);
   9230 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9231 	    && (sc->sc_type >= WM_T_82575))
   9232 		wm_linkintr_serdes(sc, icr);
   9233 	else
   9234 		wm_linkintr_tbi(sc, icr);
   9235 }
   9236 
   9237 /*
   9238  * wm_intr_legacy:
   9239  *
   9240  *	Interrupt service routine for INTx and MSI.
   9241  */
   9242 static int
   9243 wm_intr_legacy(void *arg)
   9244 {
   9245 	struct wm_softc *sc = arg;
   9246 	struct wm_queue *wmq = &sc->sc_queue[0];
   9247 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9248 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9249 	uint32_t icr, rndval = 0;
   9250 	int handled = 0;
   9251 
   9252 	while (1 /* CONSTCOND */) {
   9253 		icr = CSR_READ(sc, WMREG_ICR);
   9254 		if ((icr & sc->sc_icr) == 0)
   9255 			break;
   9256 		if (handled == 0) {
   9257 			DPRINTF(WM_DEBUG_TX,
   9258 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9259 		}
   9260 		if (rndval == 0)
   9261 			rndval = icr;
   9262 
   9263 		mutex_enter(rxq->rxq_lock);
   9264 
   9265 		if (rxq->rxq_stopping) {
   9266 			mutex_exit(rxq->rxq_lock);
   9267 			break;
   9268 		}
   9269 
   9270 		handled = 1;
   9271 
   9272 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9273 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9274 			DPRINTF(WM_DEBUG_RX,
   9275 			    ("%s: RX: got Rx intr 0x%08x\n",
   9276 				device_xname(sc->sc_dev),
   9277 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9278 			WM_Q_EVCNT_INCR(rxq, intr);
   9279 		}
   9280 #endif
   9281 		/*
   9282 		 * wm_rxeof() does *not* call upper layer functions directly,
   9283 		 * as if_percpuq_enqueue() just call softint_schedule().
   9284 		 * So, we can call wm_rxeof() in interrupt context.
   9285 		 */
   9286 		wm_rxeof(rxq, UINT_MAX);
   9287 
   9288 		mutex_exit(rxq->rxq_lock);
   9289 		mutex_enter(txq->txq_lock);
   9290 
   9291 		if (txq->txq_stopping) {
   9292 			mutex_exit(txq->txq_lock);
   9293 			break;
   9294 		}
   9295 
   9296 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9297 		if (icr & ICR_TXDW) {
   9298 			DPRINTF(WM_DEBUG_TX,
   9299 			    ("%s: TX: got TXDW interrupt\n",
   9300 				device_xname(sc->sc_dev)));
   9301 			WM_Q_EVCNT_INCR(txq, txdw);
   9302 		}
   9303 #endif
   9304 		wm_txeof(txq, UINT_MAX);
   9305 
   9306 		mutex_exit(txq->txq_lock);
   9307 		WM_CORE_LOCK(sc);
   9308 
   9309 		if (sc->sc_core_stopping) {
   9310 			WM_CORE_UNLOCK(sc);
   9311 			break;
   9312 		}
   9313 
   9314 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9315 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9316 			wm_linkintr(sc, icr);
   9317 		}
   9318 
   9319 		WM_CORE_UNLOCK(sc);
   9320 
   9321 		if (icr & ICR_RXO) {
   9322 #if defined(WM_DEBUG)
   9323 			log(LOG_WARNING, "%s: Receive overrun\n",
   9324 			    device_xname(sc->sc_dev));
   9325 #endif /* defined(WM_DEBUG) */
   9326 		}
   9327 	}
   9328 
   9329 	rnd_add_uint32(&sc->rnd_source, rndval);
   9330 
   9331 	if (handled) {
   9332 		/* Try to get more packets going. */
   9333 		softint_schedule(wmq->wmq_si);
   9334 	}
   9335 
   9336 	return handled;
   9337 }
   9338 
   9339 static inline void
   9340 wm_txrxintr_disable(struct wm_queue *wmq)
   9341 {
   9342 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9343 
   9344 	if (sc->sc_type == WM_T_82574)
   9345 		CSR_WRITE(sc, WMREG_IMC,
   9346 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9347 	else if (sc->sc_type == WM_T_82575)
   9348 		CSR_WRITE(sc, WMREG_EIMC,
   9349 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9350 	else
   9351 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9352 }
   9353 
   9354 static inline void
   9355 wm_txrxintr_enable(struct wm_queue *wmq)
   9356 {
   9357 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9358 
   9359 	wm_itrs_calculate(sc, wmq);
   9360 
   9361 	/*
   9362 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9363 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9364 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9365 	 * while each wm_handle_queue(wmq) is runnig.
   9366 	 */
   9367 	if (sc->sc_type == WM_T_82574)
   9368 		CSR_WRITE(sc, WMREG_IMS,
   9369 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9370 	else if (sc->sc_type == WM_T_82575)
   9371 		CSR_WRITE(sc, WMREG_EIMS,
   9372 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9373 	else
   9374 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9375 }
   9376 
   9377 static int
   9378 wm_txrxintr_msix(void *arg)
   9379 {
   9380 	struct wm_queue *wmq = arg;
   9381 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9382 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9383 	struct wm_softc *sc = txq->txq_sc;
   9384 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9385 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9386 	bool txmore;
   9387 	bool rxmore;
   9388 
   9389 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9390 
   9391 	DPRINTF(WM_DEBUG_TX,
   9392 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9393 
   9394 	wm_txrxintr_disable(wmq);
   9395 
   9396 	mutex_enter(txq->txq_lock);
   9397 
   9398 	if (txq->txq_stopping) {
   9399 		mutex_exit(txq->txq_lock);
   9400 		return 0;
   9401 	}
   9402 
   9403 	WM_Q_EVCNT_INCR(txq, txdw);
   9404 	txmore = wm_txeof(txq, txlimit);
   9405 	/* wm_deferred start() is done in wm_handle_queue(). */
   9406 	mutex_exit(txq->txq_lock);
   9407 
   9408 	DPRINTF(WM_DEBUG_RX,
   9409 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9410 	mutex_enter(rxq->rxq_lock);
   9411 
   9412 	if (rxq->rxq_stopping) {
   9413 		mutex_exit(rxq->rxq_lock);
   9414 		return 0;
   9415 	}
   9416 
   9417 	WM_Q_EVCNT_INCR(rxq, intr);
   9418 	rxmore = wm_rxeof(rxq, rxlimit);
   9419 	mutex_exit(rxq->rxq_lock);
   9420 
   9421 	wm_itrs_writereg(sc, wmq);
   9422 
   9423 	if (txmore || rxmore)
   9424 		softint_schedule(wmq->wmq_si);
   9425 	else
   9426 		wm_txrxintr_enable(wmq);
   9427 
   9428 	return 1;
   9429 }
   9430 
   9431 static void
   9432 wm_handle_queue(void *arg)
   9433 {
   9434 	struct wm_queue *wmq = arg;
   9435 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9436 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9437 	struct wm_softc *sc = txq->txq_sc;
   9438 	u_int txlimit = sc->sc_tx_process_limit;
   9439 	u_int rxlimit = sc->sc_rx_process_limit;
   9440 	bool txmore;
   9441 	bool rxmore;
   9442 
   9443 	mutex_enter(txq->txq_lock);
   9444 	if (txq->txq_stopping) {
   9445 		mutex_exit(txq->txq_lock);
   9446 		return;
   9447 	}
   9448 	txmore = wm_txeof(txq, txlimit);
   9449 	wm_deferred_start_locked(txq);
   9450 	mutex_exit(txq->txq_lock);
   9451 
   9452 	mutex_enter(rxq->rxq_lock);
   9453 	if (rxq->rxq_stopping) {
   9454 		mutex_exit(rxq->rxq_lock);
   9455 		return;
   9456 	}
   9457 	WM_Q_EVCNT_INCR(rxq, defer);
   9458 	rxmore = wm_rxeof(rxq, rxlimit);
   9459 	mutex_exit(rxq->rxq_lock);
   9460 
   9461 	if (txmore || rxmore)
   9462 		softint_schedule(wmq->wmq_si);
   9463 	else
   9464 		wm_txrxintr_enable(wmq);
   9465 }
   9466 
   9467 /*
   9468  * wm_linkintr_msix:
   9469  *
   9470  *	Interrupt service routine for link status change for MSI-X.
   9471  */
   9472 static int
   9473 wm_linkintr_msix(void *arg)
   9474 {
   9475 	struct wm_softc *sc = arg;
   9476 	uint32_t reg;
   9477 	bool has_rxo;
   9478 
   9479 	DPRINTF(WM_DEBUG_LINK,
   9480 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9481 
   9482 	reg = CSR_READ(sc, WMREG_ICR);
   9483 	WM_CORE_LOCK(sc);
   9484 	if (sc->sc_core_stopping)
   9485 		goto out;
   9486 
   9487 	if ((reg & ICR_LSC) != 0) {
   9488 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9489 		wm_linkintr(sc, ICR_LSC);
   9490 	}
   9491 
   9492 	/*
   9493 	 * XXX 82574 MSI-X mode workaround
   9494 	 *
   9495 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9496 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9497 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9498 	 * interrupts by writing WMREG_ICS to process receive packets.
   9499 	 */
   9500 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9501 #if defined(WM_DEBUG)
   9502 		log(LOG_WARNING, "%s: Receive overrun\n",
   9503 		    device_xname(sc->sc_dev));
   9504 #endif /* defined(WM_DEBUG) */
   9505 
   9506 		has_rxo = true;
   9507 		/*
   9508 		 * The RXO interrupt is very high rate when receive traffic is
   9509 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9510 		 * interrupts. ICR_OTHER will be enabled at the end of
   9511 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9512 		 * ICR_RXQ(1) interrupts.
   9513 		 */
   9514 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9515 
   9516 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9517 	}
   9518 
   9519 
   9520 
   9521 out:
   9522 	WM_CORE_UNLOCK(sc);
   9523 
   9524 	if (sc->sc_type == WM_T_82574) {
   9525 		if (!has_rxo)
   9526 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9527 		else
   9528 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9529 	} else if (sc->sc_type == WM_T_82575)
   9530 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9531 	else
   9532 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9533 
   9534 	return 1;
   9535 }
   9536 
   9537 /*
   9538  * Media related.
   9539  * GMII, SGMII, TBI (and SERDES)
   9540  */
   9541 
   9542 /* Common */
   9543 
   9544 /*
   9545  * wm_tbi_serdes_set_linkled:
   9546  *
   9547  *	Update the link LED on TBI and SERDES devices.
   9548  */
   9549 static void
   9550 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9551 {
   9552 
   9553 	if (sc->sc_tbi_linkup)
   9554 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9555 	else
   9556 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9557 
   9558 	/* 82540 or newer devices are active low */
   9559 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9560 
   9561 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9562 }
   9563 
   9564 /* GMII related */
   9565 
   9566 /*
   9567  * wm_gmii_reset:
   9568  *
   9569  *	Reset the PHY.
   9570  */
   9571 static void
   9572 wm_gmii_reset(struct wm_softc *sc)
   9573 {
   9574 	uint32_t reg;
   9575 	int rv;
   9576 
   9577 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9578 		device_xname(sc->sc_dev), __func__));
   9579 
   9580 	rv = sc->phy.acquire(sc);
   9581 	if (rv != 0) {
   9582 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9583 		    __func__);
   9584 		return;
   9585 	}
   9586 
   9587 	switch (sc->sc_type) {
   9588 	case WM_T_82542_2_0:
   9589 	case WM_T_82542_2_1:
   9590 		/* null */
   9591 		break;
   9592 	case WM_T_82543:
   9593 		/*
   9594 		 * With 82543, we need to force speed and duplex on the MAC
   9595 		 * equal to what the PHY speed and duplex configuration is.
   9596 		 * In addition, we need to perform a hardware reset on the PHY
   9597 		 * to take it out of reset.
   9598 		 */
   9599 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9600 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9601 
   9602 		/* The PHY reset pin is active-low. */
   9603 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9604 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9605 		    CTRL_EXT_SWDPIN(4));
   9606 		reg |= CTRL_EXT_SWDPIO(4);
   9607 
   9608 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9609 		CSR_WRITE_FLUSH(sc);
   9610 		delay(10*1000);
   9611 
   9612 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9613 		CSR_WRITE_FLUSH(sc);
   9614 		delay(150);
   9615 #if 0
   9616 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9617 #endif
   9618 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9619 		break;
   9620 	case WM_T_82544:	/* reset 10000us */
   9621 	case WM_T_82540:
   9622 	case WM_T_82545:
   9623 	case WM_T_82545_3:
   9624 	case WM_T_82546:
   9625 	case WM_T_82546_3:
   9626 	case WM_T_82541:
   9627 	case WM_T_82541_2:
   9628 	case WM_T_82547:
   9629 	case WM_T_82547_2:
   9630 	case WM_T_82571:	/* reset 100us */
   9631 	case WM_T_82572:
   9632 	case WM_T_82573:
   9633 	case WM_T_82574:
   9634 	case WM_T_82575:
   9635 	case WM_T_82576:
   9636 	case WM_T_82580:
   9637 	case WM_T_I350:
   9638 	case WM_T_I354:
   9639 	case WM_T_I210:
   9640 	case WM_T_I211:
   9641 	case WM_T_82583:
   9642 	case WM_T_80003:
   9643 		/* generic reset */
   9644 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9645 		CSR_WRITE_FLUSH(sc);
   9646 		delay(20000);
   9647 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9648 		CSR_WRITE_FLUSH(sc);
   9649 		delay(20000);
   9650 
   9651 		if ((sc->sc_type == WM_T_82541)
   9652 		    || (sc->sc_type == WM_T_82541_2)
   9653 		    || (sc->sc_type == WM_T_82547)
   9654 		    || (sc->sc_type == WM_T_82547_2)) {
   9655 			/* workaround for igp are done in igp_reset() */
   9656 			/* XXX add code to set LED after phy reset */
   9657 		}
   9658 		break;
   9659 	case WM_T_ICH8:
   9660 	case WM_T_ICH9:
   9661 	case WM_T_ICH10:
   9662 	case WM_T_PCH:
   9663 	case WM_T_PCH2:
   9664 	case WM_T_PCH_LPT:
   9665 	case WM_T_PCH_SPT:
   9666 	case WM_T_PCH_CNP:
   9667 		/* generic reset */
   9668 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9669 		CSR_WRITE_FLUSH(sc);
   9670 		delay(100);
   9671 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9672 		CSR_WRITE_FLUSH(sc);
   9673 		delay(150);
   9674 		break;
   9675 	default:
   9676 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9677 		    __func__);
   9678 		break;
   9679 	}
   9680 
   9681 	sc->phy.release(sc);
   9682 
   9683 	/* get_cfg_done */
   9684 	wm_get_cfg_done(sc);
   9685 
   9686 	/* extra setup */
   9687 	switch (sc->sc_type) {
   9688 	case WM_T_82542_2_0:
   9689 	case WM_T_82542_2_1:
   9690 	case WM_T_82543:
   9691 	case WM_T_82544:
   9692 	case WM_T_82540:
   9693 	case WM_T_82545:
   9694 	case WM_T_82545_3:
   9695 	case WM_T_82546:
   9696 	case WM_T_82546_3:
   9697 	case WM_T_82541_2:
   9698 	case WM_T_82547_2:
   9699 	case WM_T_82571:
   9700 	case WM_T_82572:
   9701 	case WM_T_82573:
   9702 	case WM_T_82574:
   9703 	case WM_T_82583:
   9704 	case WM_T_82575:
   9705 	case WM_T_82576:
   9706 	case WM_T_82580:
   9707 	case WM_T_I350:
   9708 	case WM_T_I354:
   9709 	case WM_T_I210:
   9710 	case WM_T_I211:
   9711 	case WM_T_80003:
   9712 		/* null */
   9713 		break;
   9714 	case WM_T_82541:
   9715 	case WM_T_82547:
   9716 		/* XXX Configure actively LED after PHY reset */
   9717 		break;
   9718 	case WM_T_ICH8:
   9719 	case WM_T_ICH9:
   9720 	case WM_T_ICH10:
   9721 	case WM_T_PCH:
   9722 	case WM_T_PCH2:
   9723 	case WM_T_PCH_LPT:
   9724 	case WM_T_PCH_SPT:
   9725 	case WM_T_PCH_CNP:
   9726 		wm_phy_post_reset(sc);
   9727 		break;
   9728 	default:
   9729 		panic("%s: unknown type\n", __func__);
   9730 		break;
   9731 	}
   9732 }
   9733 
   9734 /*
   9735  * Setup sc_phytype and mii_{read|write}reg.
   9736  *
   9737  *  To identify PHY type, correct read/write function should be selected.
   9738  * To select correct read/write function, PCI ID or MAC type are required
   9739  * without accessing PHY registers.
   9740  *
   9741  *  On the first call of this function, PHY ID is not known yet. Check
   9742  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9743  * result might be incorrect.
   9744  *
   9745  *  In the second call, PHY OUI and model is used to identify PHY type.
   9746  * It might not be perfpect because of the lack of compared entry, but it
   9747  * would be better than the first call.
   9748  *
   9749  *  If the detected new result and previous assumption is different,
   9750  * diagnous message will be printed.
   9751  */
   9752 static void
   9753 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9754     uint16_t phy_model)
   9755 {
   9756 	device_t dev = sc->sc_dev;
   9757 	struct mii_data *mii = &sc->sc_mii;
   9758 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9759 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9760 	mii_readreg_t new_readreg;
   9761 	mii_writereg_t new_writereg;
   9762 
   9763 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9764 		device_xname(sc->sc_dev), __func__));
   9765 
   9766 	if (mii->mii_readreg == NULL) {
   9767 		/*
   9768 		 *  This is the first call of this function. For ICH and PCH
   9769 		 * variants, it's difficult to determine the PHY access method
   9770 		 * by sc_type, so use the PCI product ID for some devices.
   9771 		 */
   9772 
   9773 		switch (sc->sc_pcidevid) {
   9774 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9775 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9776 			/* 82577 */
   9777 			new_phytype = WMPHY_82577;
   9778 			break;
   9779 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9780 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9781 			/* 82578 */
   9782 			new_phytype = WMPHY_82578;
   9783 			break;
   9784 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9785 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9786 			/* 82579 */
   9787 			new_phytype = WMPHY_82579;
   9788 			break;
   9789 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9790 		case PCI_PRODUCT_INTEL_82801I_BM:
   9791 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9792 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9793 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9794 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9795 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9796 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9797 			/* ICH8, 9, 10 with 82567 */
   9798 			new_phytype = WMPHY_BM;
   9799 			break;
   9800 		default:
   9801 			break;
   9802 		}
   9803 	} else {
   9804 		/* It's not the first call. Use PHY OUI and model */
   9805 		switch (phy_oui) {
   9806 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9807 			switch (phy_model) {
   9808 			case 0x0004: /* XXX */
   9809 				new_phytype = WMPHY_82578;
   9810 				break;
   9811 			default:
   9812 				break;
   9813 			}
   9814 			break;
   9815 		case MII_OUI_xxMARVELL:
   9816 			switch (phy_model) {
   9817 			case MII_MODEL_xxMARVELL_I210:
   9818 				new_phytype = WMPHY_I210;
   9819 				break;
   9820 			case MII_MODEL_xxMARVELL_E1011:
   9821 			case MII_MODEL_xxMARVELL_E1000_3:
   9822 			case MII_MODEL_xxMARVELL_E1000_5:
   9823 			case MII_MODEL_xxMARVELL_E1112:
   9824 				new_phytype = WMPHY_M88;
   9825 				break;
   9826 			case MII_MODEL_xxMARVELL_E1149:
   9827 				new_phytype = WMPHY_BM;
   9828 				break;
   9829 			case MII_MODEL_xxMARVELL_E1111:
   9830 			case MII_MODEL_xxMARVELL_I347:
   9831 			case MII_MODEL_xxMARVELL_E1512:
   9832 			case MII_MODEL_xxMARVELL_E1340M:
   9833 			case MII_MODEL_xxMARVELL_E1543:
   9834 				new_phytype = WMPHY_M88;
   9835 				break;
   9836 			case MII_MODEL_xxMARVELL_I82563:
   9837 				new_phytype = WMPHY_GG82563;
   9838 				break;
   9839 			default:
   9840 				break;
   9841 			}
   9842 			break;
   9843 		case MII_OUI_INTEL:
   9844 			switch (phy_model) {
   9845 			case MII_MODEL_INTEL_I82577:
   9846 				new_phytype = WMPHY_82577;
   9847 				break;
   9848 			case MII_MODEL_INTEL_I82579:
   9849 				new_phytype = WMPHY_82579;
   9850 				break;
   9851 			case MII_MODEL_INTEL_I217:
   9852 				new_phytype = WMPHY_I217;
   9853 				break;
   9854 			case MII_MODEL_INTEL_I82580:
   9855 			case MII_MODEL_INTEL_I350:
   9856 				new_phytype = WMPHY_82580;
   9857 				break;
   9858 			default:
   9859 				break;
   9860 			}
   9861 			break;
   9862 		case MII_OUI_yyINTEL:
   9863 			switch (phy_model) {
   9864 			case MII_MODEL_yyINTEL_I82562G:
   9865 			case MII_MODEL_yyINTEL_I82562EM:
   9866 			case MII_MODEL_yyINTEL_I82562ET:
   9867 				new_phytype = WMPHY_IFE;
   9868 				break;
   9869 			case MII_MODEL_yyINTEL_IGP01E1000:
   9870 				new_phytype = WMPHY_IGP;
   9871 				break;
   9872 			case MII_MODEL_yyINTEL_I82566:
   9873 				new_phytype = WMPHY_IGP_3;
   9874 				break;
   9875 			default:
   9876 				break;
   9877 			}
   9878 			break;
   9879 		default:
   9880 			break;
   9881 		}
   9882 		if (new_phytype == WMPHY_UNKNOWN)
   9883 			aprint_verbose_dev(dev,
   9884 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   9885 			    __func__, phy_oui, phy_model);
   9886 
   9887 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9888 		    && (sc->sc_phytype != new_phytype )) {
   9889 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9890 			    "was incorrect. PHY type from PHY ID = %u\n",
   9891 			    sc->sc_phytype, new_phytype);
   9892 		}
   9893 	}
   9894 
   9895 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9896 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9897 		/* SGMII */
   9898 		new_readreg = wm_sgmii_readreg;
   9899 		new_writereg = wm_sgmii_writereg;
   9900 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9901 		/* BM2 (phyaddr == 1) */
   9902 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9903 		    && (new_phytype != WMPHY_BM)
   9904 		    && (new_phytype != WMPHY_UNKNOWN))
   9905 			doubt_phytype = new_phytype;
   9906 		new_phytype = WMPHY_BM;
   9907 		new_readreg = wm_gmii_bm_readreg;
   9908 		new_writereg = wm_gmii_bm_writereg;
   9909 	} else if (sc->sc_type >= WM_T_PCH) {
   9910 		/* All PCH* use _hv_ */
   9911 		new_readreg = wm_gmii_hv_readreg;
   9912 		new_writereg = wm_gmii_hv_writereg;
   9913 	} else if (sc->sc_type >= WM_T_ICH8) {
   9914 		/* non-82567 ICH8, 9 and 10 */
   9915 		new_readreg = wm_gmii_i82544_readreg;
   9916 		new_writereg = wm_gmii_i82544_writereg;
   9917 	} else if (sc->sc_type >= WM_T_80003) {
   9918 		/* 80003 */
   9919 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9920 		    && (new_phytype != WMPHY_GG82563)
   9921 		    && (new_phytype != WMPHY_UNKNOWN))
   9922 			doubt_phytype = new_phytype;
   9923 		new_phytype = WMPHY_GG82563;
   9924 		new_readreg = wm_gmii_i80003_readreg;
   9925 		new_writereg = wm_gmii_i80003_writereg;
   9926 	} else if (sc->sc_type >= WM_T_I210) {
   9927 		/* I210 and I211 */
   9928 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9929 		    && (new_phytype != WMPHY_I210)
   9930 		    && (new_phytype != WMPHY_UNKNOWN))
   9931 			doubt_phytype = new_phytype;
   9932 		new_phytype = WMPHY_I210;
   9933 		new_readreg = wm_gmii_gs40g_readreg;
   9934 		new_writereg = wm_gmii_gs40g_writereg;
   9935 	} else if (sc->sc_type >= WM_T_82580) {
   9936 		/* 82580, I350 and I354 */
   9937 		new_readreg = wm_gmii_82580_readreg;
   9938 		new_writereg = wm_gmii_82580_writereg;
   9939 	} else if (sc->sc_type >= WM_T_82544) {
   9940 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9941 		new_readreg = wm_gmii_i82544_readreg;
   9942 		new_writereg = wm_gmii_i82544_writereg;
   9943 	} else {
   9944 		new_readreg = wm_gmii_i82543_readreg;
   9945 		new_writereg = wm_gmii_i82543_writereg;
   9946 	}
   9947 
   9948 	if (new_phytype == WMPHY_BM) {
   9949 		/* All BM use _bm_ */
   9950 		new_readreg = wm_gmii_bm_readreg;
   9951 		new_writereg = wm_gmii_bm_writereg;
   9952 	}
   9953 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   9954 		/* All PCH* use _hv_ */
   9955 		new_readreg = wm_gmii_hv_readreg;
   9956 		new_writereg = wm_gmii_hv_writereg;
   9957 	}
   9958 
   9959 	/* Diag output */
   9960 	if (doubt_phytype != WMPHY_UNKNOWN)
   9961 		aprint_error_dev(dev, "Assumed new PHY type was "
   9962 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9963 		    new_phytype);
   9964 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9965 	    && (sc->sc_phytype != new_phytype ))
   9966 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9967 		    "was incorrect. New PHY type = %u\n",
   9968 		    sc->sc_phytype, new_phytype);
   9969 
   9970 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9971 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9972 
   9973 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9974 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9975 		    "function was incorrect.\n");
   9976 
   9977 	/* Update now */
   9978 	sc->sc_phytype = new_phytype;
   9979 	mii->mii_readreg = new_readreg;
   9980 	mii->mii_writereg = new_writereg;
   9981 	if (new_readreg == wm_gmii_hv_readreg) {
   9982 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   9983 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   9984 	} else if (new_readreg == wm_sgmii_readreg) {
   9985 		sc->phy.readreg_locked = wm_sgmii_readreg_locked;
   9986 		sc->phy.writereg_locked = wm_sgmii_writereg_locked;
   9987 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   9988 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   9989 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   9990 	}
   9991 }
   9992 
   9993 /*
   9994  * wm_get_phy_id_82575:
   9995  *
   9996  * Return PHY ID. Return -1 if it failed.
   9997  */
   9998 static int
   9999 wm_get_phy_id_82575(struct wm_softc *sc)
   10000 {
   10001 	uint32_t reg;
   10002 	int phyid = -1;
   10003 
   10004 	/* XXX */
   10005 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   10006 		return -1;
   10007 
   10008 	if (wm_sgmii_uses_mdio(sc)) {
   10009 		switch (sc->sc_type) {
   10010 		case WM_T_82575:
   10011 		case WM_T_82576:
   10012 			reg = CSR_READ(sc, WMREG_MDIC);
   10013 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   10014 			break;
   10015 		case WM_T_82580:
   10016 		case WM_T_I350:
   10017 		case WM_T_I354:
   10018 		case WM_T_I210:
   10019 		case WM_T_I211:
   10020 			reg = CSR_READ(sc, WMREG_MDICNFG);
   10021 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   10022 			break;
   10023 		default:
   10024 			return -1;
   10025 		}
   10026 	}
   10027 
   10028 	return phyid;
   10029 }
   10030 
   10031 
   10032 /*
   10033  * wm_gmii_mediainit:
   10034  *
   10035  *	Initialize media for use on 1000BASE-T devices.
   10036  */
   10037 static void
   10038 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   10039 {
   10040 	device_t dev = sc->sc_dev;
   10041 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10042 	struct mii_data *mii = &sc->sc_mii;
   10043 	uint32_t reg;
   10044 
   10045 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10046 		device_xname(sc->sc_dev), __func__));
   10047 
   10048 	/* We have GMII. */
   10049 	sc->sc_flags |= WM_F_HAS_MII;
   10050 
   10051 	if (sc->sc_type == WM_T_80003)
   10052 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10053 	else
   10054 		sc->sc_tipg = TIPG_1000T_DFLT;
   10055 
   10056 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   10057 	if ((sc->sc_type == WM_T_82580)
   10058 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   10059 	    || (sc->sc_type == WM_T_I211)) {
   10060 		reg = CSR_READ(sc, WMREG_PHPM);
   10061 		reg &= ~PHPM_GO_LINK_D;
   10062 		CSR_WRITE(sc, WMREG_PHPM, reg);
   10063 	}
   10064 
   10065 	/*
   10066 	 * Let the chip set speed/duplex on its own based on
   10067 	 * signals from the PHY.
   10068 	 * XXXbouyer - I'm not sure this is right for the 80003,
   10069 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   10070 	 */
   10071 	sc->sc_ctrl |= CTRL_SLU;
   10072 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10073 
   10074 	/* Initialize our media structures and probe the GMII. */
   10075 	mii->mii_ifp = ifp;
   10076 
   10077 	mii->mii_statchg = wm_gmii_statchg;
   10078 
   10079 	/* get PHY control from SMBus to PCIe */
   10080 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   10081 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   10082 	    || (sc->sc_type == WM_T_PCH_CNP))
   10083 		wm_init_phy_workarounds_pchlan(sc);
   10084 
   10085 	wm_gmii_reset(sc);
   10086 
   10087 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10088 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   10089 	    wm_gmii_mediastatus);
   10090 
   10091 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   10092 	    || (sc->sc_type == WM_T_82580)
   10093 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   10094 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   10095 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   10096 			/* Attach only one port */
   10097 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   10098 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10099 		} else {
   10100 			int i, id;
   10101 			uint32_t ctrl_ext;
   10102 
   10103 			id = wm_get_phy_id_82575(sc);
   10104 			if (id != -1) {
   10105 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   10106 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   10107 			}
   10108 			if ((id == -1)
   10109 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10110 				/* Power on sgmii phy if it is disabled */
   10111 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10112 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   10113 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   10114 				CSR_WRITE_FLUSH(sc);
   10115 				delay(300*1000); /* XXX too long */
   10116 
   10117 				/* from 1 to 8 */
   10118 				for (i = 1; i < 8; i++)
   10119 					mii_attach(sc->sc_dev, &sc->sc_mii,
   10120 					    0xffffffff, i, MII_OFFSET_ANY,
   10121 					    MIIF_DOPAUSE);
   10122 
   10123 				/* restore previous sfp cage power state */
   10124 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10125 			}
   10126 		}
   10127 	} else
   10128 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10129 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10130 
   10131 	/*
   10132 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   10133 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   10134 	 */
   10135 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   10136 		|| (sc->sc_type == WM_T_PCH_SPT)
   10137 		|| (sc->sc_type == WM_T_PCH_CNP))
   10138 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   10139 		wm_set_mdio_slow_mode_hv(sc);
   10140 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10141 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10142 	}
   10143 
   10144 	/*
   10145 	 * (For ICH8 variants)
   10146 	 * If PHY detection failed, use BM's r/w function and retry.
   10147 	 */
   10148 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10149 		/* if failed, retry with *_bm_* */
   10150 		aprint_verbose_dev(dev, "Assumed PHY access function "
   10151 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   10152 		    sc->sc_phytype);
   10153 		sc->sc_phytype = WMPHY_BM;
   10154 		mii->mii_readreg = wm_gmii_bm_readreg;
   10155 		mii->mii_writereg = wm_gmii_bm_writereg;
   10156 
   10157 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10158 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10159 	}
   10160 
   10161 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10162 		/* Any PHY wasn't find */
   10163 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10164 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10165 		sc->sc_phytype = WMPHY_NONE;
   10166 	} else {
   10167 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10168 
   10169 		/*
   10170 		 * PHY Found! Check PHY type again by the second call of
   10171 		 * wm_gmii_setup_phytype.
   10172 		 */
   10173 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10174 		    child->mii_mpd_model);
   10175 
   10176 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10177 	}
   10178 }
   10179 
   10180 /*
   10181  * wm_gmii_mediachange:	[ifmedia interface function]
   10182  *
   10183  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10184  */
   10185 static int
   10186 wm_gmii_mediachange(struct ifnet *ifp)
   10187 {
   10188 	struct wm_softc *sc = ifp->if_softc;
   10189 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10190 	int rc;
   10191 
   10192 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10193 		device_xname(sc->sc_dev), __func__));
   10194 	if ((ifp->if_flags & IFF_UP) == 0)
   10195 		return 0;
   10196 
   10197 	/* Disable D0 LPLU. */
   10198 	wm_lplu_d0_disable(sc);
   10199 
   10200 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10201 	sc->sc_ctrl |= CTRL_SLU;
   10202 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10203 	    || (sc->sc_type > WM_T_82543)) {
   10204 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10205 	} else {
   10206 		sc->sc_ctrl &= ~CTRL_ASDE;
   10207 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10208 		if (ife->ifm_media & IFM_FDX)
   10209 			sc->sc_ctrl |= CTRL_FD;
   10210 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10211 		case IFM_10_T:
   10212 			sc->sc_ctrl |= CTRL_SPEED_10;
   10213 			break;
   10214 		case IFM_100_TX:
   10215 			sc->sc_ctrl |= CTRL_SPEED_100;
   10216 			break;
   10217 		case IFM_1000_T:
   10218 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10219 			break;
   10220 		case IFM_NONE:
   10221 			/* There is no specific setting for IFM_NONE */
   10222 			break;
   10223 		default:
   10224 			panic("wm_gmii_mediachange: bad media 0x%x",
   10225 			    ife->ifm_media);
   10226 		}
   10227 	}
   10228 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10229 	CSR_WRITE_FLUSH(sc);
   10230 	if (sc->sc_type <= WM_T_82543)
   10231 		wm_gmii_reset(sc);
   10232 
   10233 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10234 		return 0;
   10235 	return rc;
   10236 }
   10237 
   10238 /*
   10239  * wm_gmii_mediastatus:	[ifmedia interface function]
   10240  *
   10241  *	Get the current interface media status on a 1000BASE-T device.
   10242  */
   10243 static void
   10244 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10245 {
   10246 	struct wm_softc *sc = ifp->if_softc;
   10247 
   10248 	ether_mediastatus(ifp, ifmr);
   10249 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10250 	    | sc->sc_flowflags;
   10251 }
   10252 
   10253 #define	MDI_IO		CTRL_SWDPIN(2)
   10254 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10255 #define	MDI_CLK		CTRL_SWDPIN(3)
   10256 
   10257 static void
   10258 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10259 {
   10260 	uint32_t i, v;
   10261 
   10262 	v = CSR_READ(sc, WMREG_CTRL);
   10263 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10264 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10265 
   10266 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   10267 		if (data & i)
   10268 			v |= MDI_IO;
   10269 		else
   10270 			v &= ~MDI_IO;
   10271 		CSR_WRITE(sc, WMREG_CTRL, v);
   10272 		CSR_WRITE_FLUSH(sc);
   10273 		delay(10);
   10274 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10275 		CSR_WRITE_FLUSH(sc);
   10276 		delay(10);
   10277 		CSR_WRITE(sc, WMREG_CTRL, v);
   10278 		CSR_WRITE_FLUSH(sc);
   10279 		delay(10);
   10280 	}
   10281 }
   10282 
   10283 static uint32_t
   10284 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10285 {
   10286 	uint32_t v, i, data = 0;
   10287 
   10288 	v = CSR_READ(sc, WMREG_CTRL);
   10289 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10290 	v |= CTRL_SWDPIO(3);
   10291 
   10292 	CSR_WRITE(sc, WMREG_CTRL, v);
   10293 	CSR_WRITE_FLUSH(sc);
   10294 	delay(10);
   10295 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10296 	CSR_WRITE_FLUSH(sc);
   10297 	delay(10);
   10298 	CSR_WRITE(sc, WMREG_CTRL, v);
   10299 	CSR_WRITE_FLUSH(sc);
   10300 	delay(10);
   10301 
   10302 	for (i = 0; i < 16; i++) {
   10303 		data <<= 1;
   10304 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10305 		CSR_WRITE_FLUSH(sc);
   10306 		delay(10);
   10307 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10308 			data |= 1;
   10309 		CSR_WRITE(sc, WMREG_CTRL, v);
   10310 		CSR_WRITE_FLUSH(sc);
   10311 		delay(10);
   10312 	}
   10313 
   10314 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10315 	CSR_WRITE_FLUSH(sc);
   10316 	delay(10);
   10317 	CSR_WRITE(sc, WMREG_CTRL, v);
   10318 	CSR_WRITE_FLUSH(sc);
   10319 	delay(10);
   10320 
   10321 	return data;
   10322 }
   10323 
   10324 #undef MDI_IO
   10325 #undef MDI_DIR
   10326 #undef MDI_CLK
   10327 
   10328 /*
   10329  * wm_gmii_i82543_readreg:	[mii interface function]
   10330  *
   10331  *	Read a PHY register on the GMII (i82543 version).
   10332  */
   10333 static int
   10334 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   10335 {
   10336 	struct wm_softc *sc = device_private(dev);
   10337 	int rv;
   10338 
   10339 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10340 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10341 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10342 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   10343 
   10344 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   10345 		device_xname(dev), phy, reg, rv));
   10346 
   10347 	return rv;
   10348 }
   10349 
   10350 /*
   10351  * wm_gmii_i82543_writereg:	[mii interface function]
   10352  *
   10353  *	Write a PHY register on the GMII (i82543 version).
   10354  */
   10355 static void
   10356 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   10357 {
   10358 	struct wm_softc *sc = device_private(dev);
   10359 
   10360 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10361 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10362 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10363 	    (MII_COMMAND_START << 30), 32);
   10364 }
   10365 
   10366 /*
   10367  * wm_gmii_mdic_readreg:	[mii interface function]
   10368  *
   10369  *	Read a PHY register on the GMII.
   10370  */
   10371 static int
   10372 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   10373 {
   10374 	struct wm_softc *sc = device_private(dev);
   10375 	uint32_t mdic = 0;
   10376 	int i, rv;
   10377 
   10378 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10379 	    && (reg > MII_ADDRMASK)) {
   10380 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10381 		    __func__, sc->sc_phytype, reg);
   10382 		reg &= MII_ADDRMASK;
   10383 	}
   10384 
   10385 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10386 	    MDIC_REGADD(reg));
   10387 
   10388 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10389 		delay(50);
   10390 		mdic = CSR_READ(sc, WMREG_MDIC);
   10391 		if (mdic & MDIC_READY)
   10392 			break;
   10393 	}
   10394 
   10395 	if ((mdic & MDIC_READY) == 0) {
   10396 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   10397 		    device_xname(dev), phy, reg);
   10398 		return 0;
   10399 	} else if (mdic & MDIC_E) {
   10400 #if 0 /* This is normal if no PHY is present. */
   10401 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   10402 		    device_xname(dev), phy, reg);
   10403 #endif
   10404 		return 0;
   10405 	} else {
   10406 		rv = MDIC_DATA(mdic);
   10407 		if (rv == 0xffff)
   10408 			rv = 0;
   10409 	}
   10410 
   10411 	/*
   10412 	 * Allow some time after each MDIC transaction to avoid
   10413 	 * reading duplicate data in the next MDIC transaction.
   10414 	 */
   10415 	if (sc->sc_type == WM_T_PCH2)
   10416 		delay(100);
   10417 
   10418 	return rv;
   10419 }
   10420 
   10421 /*
   10422  * wm_gmii_mdic_writereg:	[mii interface function]
   10423  *
   10424  *	Write a PHY register on the GMII.
   10425  */
   10426 static void
   10427 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   10428 {
   10429 	struct wm_softc *sc = device_private(dev);
   10430 	uint32_t mdic = 0;
   10431 	int i;
   10432 
   10433 	if ((sc->sc_phytype != WMPHY_82579) && (sc->sc_phytype != WMPHY_I217)
   10434 	    && (reg > MII_ADDRMASK)) {
   10435 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10436 		    __func__, sc->sc_phytype, reg);
   10437 		reg &= MII_ADDRMASK;
   10438 	}
   10439 
   10440 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10441 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10442 
   10443 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10444 		delay(50);
   10445 		mdic = CSR_READ(sc, WMREG_MDIC);
   10446 		if (mdic & MDIC_READY)
   10447 			break;
   10448 	}
   10449 
   10450 	if ((mdic & MDIC_READY) == 0) {
   10451 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   10452 		    device_xname(dev), phy, reg);
   10453 		return;
   10454 	} else if (mdic & MDIC_E) {
   10455 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   10456 		    device_xname(dev), phy, reg);
   10457 		return;
   10458 	}
   10459 
   10460 	/*
   10461 	 * Allow some time after each MDIC transaction to avoid
   10462 	 * reading duplicate data in the next MDIC transaction.
   10463 	 */
   10464 	if (sc->sc_type == WM_T_PCH2)
   10465 		delay(100);
   10466 }
   10467 
   10468 /*
   10469  * wm_gmii_i82544_readreg:	[mii interface function]
   10470  *
   10471  *	Read a PHY register on the GMII.
   10472  */
   10473 static int
   10474 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   10475 {
   10476 	struct wm_softc *sc = device_private(dev);
   10477 	uint16_t val;
   10478 
   10479 	if (sc->phy.acquire(sc)) {
   10480 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10481 		return 0;
   10482 	}
   10483 
   10484 	wm_gmii_i82544_readreg_locked(dev, phy, reg, &val);
   10485 
   10486 	sc->phy.release(sc);
   10487 
   10488 	return val;
   10489 }
   10490 
   10491 static int
   10492 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10493 {
   10494 	struct wm_softc *sc = device_private(dev);
   10495 
   10496 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10497 		switch (sc->sc_phytype) {
   10498 		case WMPHY_IGP:
   10499 		case WMPHY_IGP_2:
   10500 		case WMPHY_IGP_3:
   10501 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10502 			    reg);
   10503 			break;
   10504 		default:
   10505 #ifdef WM_DEBUG
   10506 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10507 			    __func__, sc->sc_phytype, reg);
   10508 #endif
   10509 			break;
   10510 		}
   10511 	}
   10512 
   10513 	*val = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10514 
   10515 	return 0;
   10516 }
   10517 
   10518 /*
   10519  * wm_gmii_i82544_writereg:	[mii interface function]
   10520  *
   10521  *	Write a PHY register on the GMII.
   10522  */
   10523 static void
   10524 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   10525 {
   10526 	struct wm_softc *sc = device_private(dev);
   10527 
   10528 	if (sc->phy.acquire(sc)) {
   10529 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10530 		return;
   10531 	}
   10532 
   10533 	wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10534 	sc->phy.release(sc);
   10535 }
   10536 
   10537 static int
   10538 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10539 {
   10540 	struct wm_softc *sc = device_private(dev);
   10541 
   10542 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10543 		switch (sc->sc_phytype) {
   10544 		case WMPHY_IGP:
   10545 		case WMPHY_IGP_2:
   10546 		case WMPHY_IGP_3:
   10547 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10548 			    reg);
   10549 			break;
   10550 		default:
   10551 #ifdef WM_DEBUG
   10552 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10553 			    __func__, sc->sc_phytype, reg);
   10554 #endif
   10555 			break;
   10556 		}
   10557 	}
   10558 
   10559 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10560 
   10561 	return 0;
   10562 }
   10563 
   10564 /*
   10565  * wm_gmii_i80003_readreg:	[mii interface function]
   10566  *
   10567  *	Read a PHY register on the kumeran
   10568  * This could be handled by the PHY layer if we didn't have to lock the
   10569  * ressource ...
   10570  */
   10571 static int
   10572 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10573 {
   10574 	struct wm_softc *sc = device_private(dev);
   10575 	int page_select, temp;
   10576 	int rv;
   10577 
   10578 	if (phy != 1) /* only one PHY on kumeran bus */
   10579 		return 0;
   10580 
   10581 	if (sc->phy.acquire(sc)) {
   10582 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10583 		return 0;
   10584 	}
   10585 
   10586 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10587 		page_select = GG82563_PHY_PAGE_SELECT;
   10588 	else {
   10589 		/*
   10590 		 * Use Alternative Page Select register to access registers
   10591 		 * 30 and 31.
   10592 		 */
   10593 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10594 	}
   10595 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10596 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10597 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10598 		/*
   10599 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10600 		 * register.
   10601 		 */
   10602 		delay(200);
   10603 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10604 			device_printf(dev, "%s failed\n", __func__);
   10605 			rv = 0; /* XXX */
   10606 			goto out;
   10607 		}
   10608 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10609 		delay(200);
   10610 	} else
   10611 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10612 
   10613 out:
   10614 	sc->phy.release(sc);
   10615 	return rv;
   10616 }
   10617 
   10618 /*
   10619  * wm_gmii_i80003_writereg:	[mii interface function]
   10620  *
   10621  *	Write a PHY register on the kumeran.
   10622  * This could be handled by the PHY layer if we didn't have to lock the
   10623  * ressource ...
   10624  */
   10625 static void
   10626 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10627 {
   10628 	struct wm_softc *sc = device_private(dev);
   10629 	int page_select, temp;
   10630 
   10631 	if (phy != 1) /* only one PHY on kumeran bus */
   10632 		return;
   10633 
   10634 	if (sc->phy.acquire(sc)) {
   10635 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10636 		return;
   10637 	}
   10638 
   10639 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10640 		page_select = GG82563_PHY_PAGE_SELECT;
   10641 	else {
   10642 		/*
   10643 		 * Use Alternative Page Select register to access registers
   10644 		 * 30 and 31.
   10645 		 */
   10646 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10647 	}
   10648 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10649 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10650 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10651 		/*
   10652 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10653 		 * register.
   10654 		 */
   10655 		delay(200);
   10656 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10657 			device_printf(dev, "%s failed\n", __func__);
   10658 			goto out;
   10659 		}
   10660 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10661 		delay(200);
   10662 	} else
   10663 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10664 
   10665 out:
   10666 	sc->phy.release(sc);
   10667 }
   10668 
   10669 /*
   10670  * wm_gmii_bm_readreg:	[mii interface function]
   10671  *
   10672  *	Read a PHY register on the kumeran
   10673  * This could be handled by the PHY layer if we didn't have to lock the
   10674  * ressource ...
   10675  */
   10676 static int
   10677 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10678 {
   10679 	struct wm_softc *sc = device_private(dev);
   10680 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10681 	uint16_t val;
   10682 	int rv;
   10683 
   10684 	if (sc->phy.acquire(sc)) {
   10685 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10686 		return 0;
   10687 	}
   10688 
   10689 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10690 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10691 		    || (reg == 31)) ? 1 : phy;
   10692 	/* Page 800 works differently than the rest so it has its own func */
   10693 	if (page == BM_WUC_PAGE) {
   10694 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, true, false);
   10695 		rv = val;
   10696 		goto release;
   10697 	}
   10698 
   10699 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10700 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10701 		    && (sc->sc_type != WM_T_82583))
   10702 			wm_gmii_mdic_writereg(dev, phy,
   10703 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10704 		else
   10705 			wm_gmii_mdic_writereg(dev, phy,
   10706 			    BME1000_PHY_PAGE_SELECT, page);
   10707 	}
   10708 
   10709 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10710 
   10711 release:
   10712 	sc->phy.release(sc);
   10713 	return rv;
   10714 }
   10715 
   10716 /*
   10717  * wm_gmii_bm_writereg:	[mii interface function]
   10718  *
   10719  *	Write a PHY register on the kumeran.
   10720  * This could be handled by the PHY layer if we didn't have to lock the
   10721  * ressource ...
   10722  */
   10723 static void
   10724 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10725 {
   10726 	struct wm_softc *sc = device_private(dev);
   10727 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10728 
   10729 	if (sc->phy.acquire(sc)) {
   10730 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10731 		return;
   10732 	}
   10733 
   10734 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10735 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10736 		    || (reg == 31)) ? 1 : phy;
   10737 	/* Page 800 works differently than the rest so it has its own func */
   10738 	if (page == BM_WUC_PAGE) {
   10739 		uint16_t tmp;
   10740 
   10741 		tmp = val;
   10742 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, false, false);
   10743 		goto release;
   10744 	}
   10745 
   10746 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10747 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10748 		    && (sc->sc_type != WM_T_82583))
   10749 			wm_gmii_mdic_writereg(dev, phy,
   10750 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10751 		else
   10752 			wm_gmii_mdic_writereg(dev, phy,
   10753 			    BME1000_PHY_PAGE_SELECT, page);
   10754 	}
   10755 
   10756 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10757 
   10758 release:
   10759 	sc->phy.release(sc);
   10760 }
   10761 
   10762 /*
   10763  *  wm_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
   10764  *  @dev: pointer to the HW structure
   10765  *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
   10766  *
   10767  *  Assumes semaphore already acquired and phy_reg points to a valid memory
   10768  *  address to store contents of the BM_WUC_ENABLE_REG register.
   10769  */
   10770 static int
   10771 wm_enable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10772 {
   10773 	uint16_t temp;
   10774 
   10775 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10776 		device_xname(dev), __func__));
   10777 
   10778 	if (!phy_regp)
   10779 		return -1;
   10780 
   10781 	/* All page select, port ctrl and wakeup registers use phy address 1 */
   10782 
   10783 	/* Select Port Control Registers page */
   10784 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10785 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10786 
   10787 	/* Read WUCE and save it */
   10788 	*phy_regp = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10789 
   10790 	/* Enable both PHY wakeup mode and Wakeup register page writes.
   10791 	 * Prevent a power state change by disabling ME and Host PHY wakeup.
   10792 	 */
   10793 	temp = *phy_regp;
   10794 	temp |= BM_WUC_ENABLE_BIT;
   10795 	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10796 
   10797 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, temp);
   10798 
   10799 	/* Select Host Wakeup Registers page - caller now able to write
   10800 	 * registers on the Wakeup registers page
   10801 	 */
   10802 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10803 	    BM_WUC_PAGE << IGP3_PAGE_SHIFT);
   10804 
   10805 	return 0;
   10806 }
   10807 
   10808 /*
   10809  *  wm_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
   10810  *  @dev: pointer to the HW structure
   10811  *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
   10812  *
   10813  *  Restore BM_WUC_ENABLE_REG to its original value.
   10814  *
   10815  *  Assumes semaphore already acquired and *phy_reg is the contents of the
   10816  *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
   10817  *  caller.
   10818  */
   10819 static int
   10820 wm_disable_phy_wakeup_reg_access_bm(device_t dev, uint16_t *phy_regp)
   10821 {
   10822 
   10823 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   10824 		device_xname(dev), __func__));
   10825 
   10826 	if (!phy_regp)
   10827 		return -1;
   10828 
   10829 	/* Select Port Control Registers page */
   10830 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10831 	    BM_PORT_CTRL_PAGE << IGP3_PAGE_SHIFT);
   10832 
   10833 	/* Restore 769.17 to its original value */
   10834 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, *phy_regp);
   10835 
   10836 	return 0;
   10837 }
   10838 
   10839 /*
   10840  *  wm_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
   10841  *  @sc: pointer to the HW structure
   10842  *  @offset: register offset to be read or written
   10843  *  @val: pointer to the data to read or write
   10844  *  @rd: determines if operation is read or write
   10845  *  @page_set: BM_WUC_PAGE already set and access enabled
   10846  *
   10847  *  Read the PHY register at offset and store the retrieved information in
   10848  *  data, or write data to PHY register at offset.  Note the procedure to
   10849  *  access the PHY wakeup registers is different than reading the other PHY
   10850  *  registers. It works as such:
   10851  *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
   10852  *  2) Set page to 800 for host (801 if we were manageability)
   10853  *  3) Write the address using the address opcode (0x11)
   10854  *  4) Read or write the data using the data opcode (0x12)
   10855  *  5) Restore 769.17.2 to its original value
   10856  *
   10857  *  Steps 1 and 2 are done by wm_enable_phy_wakeup_reg_access_bm() and
   10858  *  step 5 is done by wm_disable_phy_wakeup_reg_access_bm().
   10859  *
   10860  *  Assumes semaphore is already acquired.  When page_set==TRUE, assumes
   10861  *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
   10862  *  is responsible for calls to wm_[enable|disable]_phy_wakeup_reg_bm()).
   10863  */
   10864 static int
   10865 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd,
   10866 	bool page_set)
   10867 {
   10868 	struct wm_softc *sc = device_private(dev);
   10869 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10870 	uint16_t page = BM_PHY_REG_PAGE(offset);
   10871 	uint16_t wuce;
   10872 	int rv = 0;
   10873 
   10874 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10875 		device_xname(dev), __func__));
   10876 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10877 	if ((sc->sc_type == WM_T_PCH)
   10878 	    && ((CSR_READ(sc, WMREG_PHY_CTRL) & PHY_CTRL_GBE_DIS) == 0)) {
   10879 		device_printf(dev,
   10880 		    "Attempting to access page %d while gig enabled.\n", page);
   10881 	}
   10882 
   10883 	if (!page_set) {
   10884 		/* Enable access to PHY wakeup registers */
   10885 		rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   10886 		if (rv != 0) {
   10887 			device_printf(dev,
   10888 			    "%s: Could not enable PHY wakeup reg access\n",
   10889 			    __func__);
   10890 			return rv;
   10891 		}
   10892 	}
   10893 	DPRINTF(WM_DEBUG_GMII, ("%s: %s: Accessing PHY page %d reg 0x%x\n",
   10894 		device_xname(sc->sc_dev), __func__, page, regnum));
   10895 
   10896 	/*
   10897 	 * 2) Access PHY wakeup register.
   10898 	 * See wm_access_phy_wakeup_reg_bm.
   10899 	 */
   10900 
   10901 	/* Write the Wakeup register page offset value using opcode 0x11 */
   10902 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10903 
   10904 	if (rd) {
   10905 		/* Read the Wakeup register page value using opcode 0x12 */
   10906 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10907 	} else {
   10908 		/* Write the Wakeup register page value using opcode 0x12 */
   10909 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10910 	}
   10911 
   10912 	if (!page_set)
   10913 		rv = wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   10914 
   10915 	return rv;
   10916 }
   10917 
   10918 /*
   10919  * wm_gmii_hv_readreg:	[mii interface function]
   10920  *
   10921  *	Read a PHY register on the kumeran
   10922  * This could be handled by the PHY layer if we didn't have to lock the
   10923  * ressource ...
   10924  */
   10925 static int
   10926 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10927 {
   10928 	struct wm_softc *sc = device_private(dev);
   10929 	uint16_t val;
   10930 
   10931 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10932 		device_xname(dev), __func__));
   10933 	if (sc->phy.acquire(sc)) {
   10934 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10935 		return 0;
   10936 	}
   10937 
   10938 	wm_gmii_hv_readreg_locked(dev, phy, reg, &val);
   10939 	sc->phy.release(sc);
   10940 	return val;
   10941 }
   10942 
   10943 static int
   10944 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10945 {
   10946 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10947 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10948 
   10949 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10950 
   10951 	/* Page 800 works differently than the rest so it has its own func */
   10952 	if (page == BM_WUC_PAGE)
   10953 		return wm_access_phy_wakeup_reg_bm(dev, reg, val, true, false);
   10954 
   10955 	/*
   10956 	 * Lower than page 768 works differently than the rest so it has its
   10957 	 * own func
   10958 	 */
   10959 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10960 		printf("gmii_hv_readreg!!!\n");
   10961 		return 0;
   10962 	}
   10963 
   10964 	/*
   10965 	 * XXX I21[789] documents say that the SMBus Address register is at
   10966 	 * PHY address 01, Page 0 (not 768), Register 26.
   10967 	 */
   10968 	if (page == HV_INTC_FC_PAGE_START)
   10969 		page = 0;
   10970 
   10971 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10972 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10973 		    page << BME1000_PAGE_SHIFT);
   10974 	}
   10975 
   10976 	*val = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10977 	return 0;
   10978 }
   10979 
   10980 /*
   10981  * wm_gmii_hv_writereg:	[mii interface function]
   10982  *
   10983  *	Write a PHY register on the kumeran.
   10984  * This could be handled by the PHY layer if we didn't have to lock the
   10985  * ressource ...
   10986  */
   10987 static void
   10988 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10989 {
   10990 	struct wm_softc *sc = device_private(dev);
   10991 
   10992 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10993 		device_xname(dev), __func__));
   10994 
   10995 	if (sc->phy.acquire(sc)) {
   10996 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10997 		return;
   10998 	}
   10999 
   11000 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   11001 	sc->phy.release(sc);
   11002 }
   11003 
   11004 static int
   11005 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11006 {
   11007 	struct wm_softc *sc = device_private(dev);
   11008 	uint16_t page = BM_PHY_REG_PAGE(reg);
   11009 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   11010 	int rv;
   11011 
   11012 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   11013 
   11014 	/* Page 800 works differently than the rest so it has its own func */
   11015 	if (page == BM_WUC_PAGE) {
   11016 		uint16_t tmp;
   11017 
   11018 		tmp = val;
   11019 		rv = wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, false, false);
   11020 		return rv;
   11021 	}
   11022 
   11023 	/*
   11024 	 * Lower than page 768 works differently than the rest so it has its
   11025 	 * own func
   11026 	 */
   11027 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   11028 		printf("gmii_hv_writereg!!!\n");
   11029 		return -1;
   11030 	}
   11031 
   11032 	{
   11033 		/*
   11034 		 * XXX I21[789] documents say that the SMBus Address register
   11035 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   11036 		 */
   11037 		if (page == HV_INTC_FC_PAGE_START)
   11038 			page = 0;
   11039 
   11040 		/*
   11041 		 * XXX Workaround MDIO accesses being disabled after entering
   11042 		 * IEEE Power Down (whenever bit 11 of the PHY control
   11043 		 * register is set)
   11044 		 */
   11045 		if (sc->sc_phytype == WMPHY_82578) {
   11046 			struct mii_softc *child;
   11047 
   11048 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11049 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   11050 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   11051 			    && ((val & (1 << 11)) != 0)) {
   11052 				printf("XXX need workaround\n");
   11053 			}
   11054 		}
   11055 
   11056 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   11057 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   11058 			    page << BME1000_PAGE_SHIFT);
   11059 		}
   11060 	}
   11061 
   11062 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   11063 
   11064 	return 0;
   11065 }
   11066 
   11067 /*
   11068  * wm_gmii_82580_readreg:	[mii interface function]
   11069  *
   11070  *	Read a PHY register on the 82580 and I350.
   11071  * This could be handled by the PHY layer if we didn't have to lock the
   11072  * ressource ...
   11073  */
   11074 static int
   11075 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   11076 {
   11077 	struct wm_softc *sc = device_private(dev);
   11078 	int rv;
   11079 
   11080 	if (sc->phy.acquire(sc) != 0) {
   11081 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11082 		return 0;
   11083 	}
   11084 
   11085 #ifdef DIAGNOSTIC
   11086 	if (reg > MII_ADDRMASK) {
   11087 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11088 		    __func__, sc->sc_phytype, reg);
   11089 		reg &= MII_ADDRMASK;
   11090 	}
   11091 #endif
   11092 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   11093 
   11094 	sc->phy.release(sc);
   11095 	return rv;
   11096 }
   11097 
   11098 /*
   11099  * wm_gmii_82580_writereg:	[mii interface function]
   11100  *
   11101  *	Write a PHY register on the 82580 and I350.
   11102  * This could be handled by the PHY layer if we didn't have to lock the
   11103  * ressource ...
   11104  */
   11105 static void
   11106 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   11107 {
   11108 	struct wm_softc *sc = device_private(dev);
   11109 
   11110 	if (sc->phy.acquire(sc) != 0) {
   11111 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11112 		return;
   11113 	}
   11114 
   11115 #ifdef DIAGNOSTIC
   11116 	if (reg > MII_ADDRMASK) {
   11117 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   11118 		    __func__, sc->sc_phytype, reg);
   11119 		reg &= MII_ADDRMASK;
   11120 	}
   11121 #endif
   11122 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   11123 
   11124 	sc->phy.release(sc);
   11125 }
   11126 
   11127 /*
   11128  * wm_gmii_gs40g_readreg:	[mii interface function]
   11129  *
   11130  *	Read a PHY register on the I2100 and I211.
   11131  * This could be handled by the PHY layer if we didn't have to lock the
   11132  * ressource ...
   11133  */
   11134 static int
   11135 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   11136 {
   11137 	struct wm_softc *sc = device_private(dev);
   11138 	int page, offset;
   11139 	int rv;
   11140 
   11141 	/* Acquire semaphore */
   11142 	if (sc->phy.acquire(sc)) {
   11143 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11144 		return 0;
   11145 	}
   11146 
   11147 	/* Page select */
   11148 	page = reg >> GS40G_PAGE_SHIFT;
   11149 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11150 
   11151 	/* Read reg */
   11152 	offset = reg & GS40G_OFFSET_MASK;
   11153 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   11154 
   11155 	sc->phy.release(sc);
   11156 	return rv;
   11157 }
   11158 
   11159 /*
   11160  * wm_gmii_gs40g_writereg:	[mii interface function]
   11161  *
   11162  *	Write a PHY register on the I210 and I211.
   11163  * This could be handled by the PHY layer if we didn't have to lock the
   11164  * ressource ...
   11165  */
   11166 static void
   11167 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   11168 {
   11169 	struct wm_softc *sc = device_private(dev);
   11170 	int page, offset;
   11171 
   11172 	/* Acquire semaphore */
   11173 	if (sc->phy.acquire(sc)) {
   11174 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11175 		return;
   11176 	}
   11177 
   11178 	/* Page select */
   11179 	page = reg >> GS40G_PAGE_SHIFT;
   11180 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   11181 
   11182 	/* Write reg */
   11183 	offset = reg & GS40G_OFFSET_MASK;
   11184 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   11185 
   11186 	/* Release semaphore */
   11187 	sc->phy.release(sc);
   11188 }
   11189 
   11190 /*
   11191  * wm_gmii_statchg:	[mii interface function]
   11192  *
   11193  *	Callback from MII layer when media changes.
   11194  */
   11195 static void
   11196 wm_gmii_statchg(struct ifnet *ifp)
   11197 {
   11198 	struct wm_softc *sc = ifp->if_softc;
   11199 	struct mii_data *mii = &sc->sc_mii;
   11200 
   11201 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   11202 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11203 	sc->sc_fcrtl &= ~FCRTL_XONE;
   11204 
   11205 	/*
   11206 	 * Get flow control negotiation result.
   11207 	 */
   11208 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   11209 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   11210 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   11211 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   11212 	}
   11213 
   11214 	if (sc->sc_flowflags & IFM_FLOW) {
   11215 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   11216 			sc->sc_ctrl |= CTRL_TFCE;
   11217 			sc->sc_fcrtl |= FCRTL_XONE;
   11218 		}
   11219 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   11220 			sc->sc_ctrl |= CTRL_RFCE;
   11221 	}
   11222 
   11223 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   11224 		DPRINTF(WM_DEBUG_LINK,
   11225 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   11226 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11227 	} else {
   11228 		DPRINTF(WM_DEBUG_LINK,
   11229 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   11230 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11231 	}
   11232 
   11233 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11234 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11235 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   11236 						 : WMREG_FCRTL, sc->sc_fcrtl);
   11237 	if (sc->sc_type == WM_T_80003) {
   11238 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   11239 		case IFM_1000_T:
   11240 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11241 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   11242 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   11243 			break;
   11244 		default:
   11245 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   11246 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   11247 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   11248 			break;
   11249 		}
   11250 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   11251 	}
   11252 }
   11253 
   11254 /* kumeran related (80003, ICH* and PCH*) */
   11255 
   11256 /*
   11257  * wm_kmrn_readreg:
   11258  *
   11259  *	Read a kumeran register
   11260  */
   11261 static int
   11262 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11263 {
   11264 	int rv;
   11265 
   11266 	if (sc->sc_type == WM_T_80003)
   11267 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11268 	else
   11269 		rv = sc->phy.acquire(sc);
   11270 	if (rv != 0) {
   11271 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11272 		    __func__);
   11273 		return rv;
   11274 	}
   11275 
   11276 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11277 
   11278 	if (sc->sc_type == WM_T_80003)
   11279 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11280 	else
   11281 		sc->phy.release(sc);
   11282 
   11283 	return rv;
   11284 }
   11285 
   11286 static int
   11287 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11288 {
   11289 
   11290 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11291 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11292 	    KUMCTRLSTA_REN);
   11293 	CSR_WRITE_FLUSH(sc);
   11294 	delay(2);
   11295 
   11296 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11297 
   11298 	return 0;
   11299 }
   11300 
   11301 /*
   11302  * wm_kmrn_writereg:
   11303  *
   11304  *	Write a kumeran register
   11305  */
   11306 static int
   11307 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11308 {
   11309 	int rv;
   11310 
   11311 	if (sc->sc_type == WM_T_80003)
   11312 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11313 	else
   11314 		rv = sc->phy.acquire(sc);
   11315 	if (rv != 0) {
   11316 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11317 		    __func__);
   11318 		return rv;
   11319 	}
   11320 
   11321 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11322 
   11323 	if (sc->sc_type == WM_T_80003)
   11324 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11325 	else
   11326 		sc->phy.release(sc);
   11327 
   11328 	return rv;
   11329 }
   11330 
   11331 static int
   11332 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11333 {
   11334 
   11335 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11336 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11337 
   11338 	return 0;
   11339 }
   11340 
   11341 /*
   11342  * EMI register related (82579, WMPHY_I217(PCH2 and newer))
   11343  * This access method is different from IEEE MMD.
   11344  */
   11345 static int
   11346 wm_access_emi_reg_locked(device_t dev, int reg, uint16_t *val, bool rd)
   11347 {
   11348 	struct wm_softc *sc = device_private(dev);
   11349 	int rv;
   11350 
   11351 	rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_ADDR, reg);
   11352 	if (rv != 0)
   11353 		return rv;
   11354 
   11355 	if (rd)
   11356 		rv = sc->phy.readreg_locked(dev, 2, I82579_EMI_DATA, val);
   11357 	else
   11358 		rv = sc->phy.writereg_locked(dev, 2, I82579_EMI_DATA, *val);
   11359 	return rv;
   11360 }
   11361 
   11362 static int
   11363 wm_read_emi_reg_locked(device_t dev, int reg, uint16_t *val)
   11364 {
   11365 
   11366 	return wm_access_emi_reg_locked(dev, reg, val, true);
   11367 }
   11368 
   11369 static int
   11370 wm_write_emi_reg_locked(device_t dev, int reg, uint16_t val)
   11371 {
   11372 
   11373 	return wm_access_emi_reg_locked(dev, reg, &val, false);
   11374 }
   11375 
   11376 /* SGMII related */
   11377 
   11378 /*
   11379  * wm_sgmii_uses_mdio
   11380  *
   11381  * Check whether the transaction is to the internal PHY or the external
   11382  * MDIO interface. Return true if it's MDIO.
   11383  */
   11384 static bool
   11385 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11386 {
   11387 	uint32_t reg;
   11388 	bool ismdio = false;
   11389 
   11390 	switch (sc->sc_type) {
   11391 	case WM_T_82575:
   11392 	case WM_T_82576:
   11393 		reg = CSR_READ(sc, WMREG_MDIC);
   11394 		ismdio = ((reg & MDIC_DEST) != 0);
   11395 		break;
   11396 	case WM_T_82580:
   11397 	case WM_T_I350:
   11398 	case WM_T_I354:
   11399 	case WM_T_I210:
   11400 	case WM_T_I211:
   11401 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11402 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11403 		break;
   11404 	default:
   11405 		break;
   11406 	}
   11407 
   11408 	return ismdio;
   11409 }
   11410 
   11411 /*
   11412  * wm_sgmii_readreg:	[mii interface function]
   11413  *
   11414  *	Read a PHY register on the SGMII
   11415  * This could be handled by the PHY layer if we didn't have to lock the
   11416  * ressource ...
   11417  */
   11418 static int
   11419 wm_sgmii_readreg(device_t dev, int phy, int reg)
   11420 {
   11421 	struct wm_softc *sc = device_private(dev);
   11422 	uint16_t val;
   11423 
   11424 	if (sc->phy.acquire(sc)) {
   11425 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11426 		return 0;
   11427 	}
   11428 
   11429 	wm_sgmii_readreg_locked(dev, phy, reg, &val);
   11430 
   11431 	sc->phy.release(sc);
   11432 	return val;
   11433 }
   11434 
   11435 static int
   11436 wm_sgmii_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   11437 {
   11438 	struct wm_softc *sc = device_private(dev);
   11439 	uint32_t i2ccmd;
   11440 	int i, rv;
   11441 
   11442 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11443 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11444 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11445 
   11446 	/* Poll the ready bit */
   11447 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11448 		delay(50);
   11449 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11450 		if (i2ccmd & I2CCMD_READY)
   11451 			break;
   11452 	}
   11453 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11454 		device_printf(dev, "I2CCMD Read did not complete\n");
   11455 		rv = ETIMEDOUT;
   11456 	}
   11457 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11458 		device_printf(dev, "I2CCMD Error bit set\n");
   11459 		rv = EIO;
   11460 	}
   11461 
   11462 	*val = (uint16_t)((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11463 
   11464 	return rv;
   11465 }
   11466 
   11467 /*
   11468  * wm_sgmii_writereg:	[mii interface function]
   11469  *
   11470  *	Write a PHY register on the SGMII.
   11471  * This could be handled by the PHY layer if we didn't have to lock the
   11472  * ressource ...
   11473  */
   11474 static void
   11475 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   11476 {
   11477 	struct wm_softc *sc = device_private(dev);
   11478 
   11479 	if (sc->phy.acquire(sc) != 0) {
   11480 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11481 		return;
   11482 	}
   11483 
   11484 	wm_sgmii_writereg_locked(dev, phy, reg, val);
   11485 
   11486 	sc->phy.release(sc);
   11487 }
   11488 
   11489 static int
   11490 wm_sgmii_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   11491 {
   11492 	struct wm_softc *sc = device_private(dev);
   11493 	uint32_t i2ccmd;
   11494 	uint16_t swapdata;
   11495 	int rv = 0;
   11496 	int i;
   11497 
   11498 	/* Swap the data bytes for the I2C interface */
   11499 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11500 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11501 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11502 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11503 
   11504 	/* Poll the ready bit */
   11505 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11506 		delay(50);
   11507 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11508 		if (i2ccmd & I2CCMD_READY)
   11509 			break;
   11510 	}
   11511 	if ((i2ccmd & I2CCMD_READY) == 0) {
   11512 		device_printf(dev, "I2CCMD Write did not complete\n");
   11513 		rv = ETIMEDOUT;
   11514 	}
   11515 	if ((i2ccmd & I2CCMD_ERROR) != 0) {
   11516 		device_printf(dev, "I2CCMD Error bit set\n");
   11517 		rv = EIO;
   11518 	}
   11519 
   11520 	return rv;
   11521 }
   11522 
   11523 /* TBI related */
   11524 
   11525 static bool
   11526 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11527 {
   11528 	bool sig;
   11529 
   11530 	sig = ctrl & CTRL_SWDPIN(1);
   11531 
   11532 	/*
   11533 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11534 	 * detect a signal, 1 if they don't.
   11535 	 */
   11536 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11537 		sig = !sig;
   11538 
   11539 	return sig;
   11540 }
   11541 
   11542 /*
   11543  * wm_tbi_mediainit:
   11544  *
   11545  *	Initialize media for use on 1000BASE-X devices.
   11546  */
   11547 static void
   11548 wm_tbi_mediainit(struct wm_softc *sc)
   11549 {
   11550 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11551 	const char *sep = "";
   11552 
   11553 	if (sc->sc_type < WM_T_82543)
   11554 		sc->sc_tipg = TIPG_WM_DFLT;
   11555 	else
   11556 		sc->sc_tipg = TIPG_LG_DFLT;
   11557 
   11558 	sc->sc_tbi_serdes_anegticks = 5;
   11559 
   11560 	/* Initialize our media structures */
   11561 	sc->sc_mii.mii_ifp = ifp;
   11562 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11563 
   11564 	if ((sc->sc_type >= WM_T_82575)
   11565 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11566 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11567 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11568 	else
   11569 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11570 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11571 
   11572 	/*
   11573 	 * SWD Pins:
   11574 	 *
   11575 	 *	0 = Link LED (output)
   11576 	 *	1 = Loss Of Signal (input)
   11577 	 */
   11578 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11579 
   11580 	/* XXX Perhaps this is only for TBI */
   11581 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11582 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11583 
   11584 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11585 		sc->sc_ctrl &= ~CTRL_LRST;
   11586 
   11587 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11588 
   11589 #define	ADD(ss, mm, dd)							\
   11590 do {									\
   11591 	aprint_normal("%s%s", sep, ss);					\
   11592 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11593 	sep = ", ";							\
   11594 } while (/*CONSTCOND*/0)
   11595 
   11596 	aprint_normal_dev(sc->sc_dev, "");
   11597 
   11598 	if (sc->sc_type == WM_T_I354) {
   11599 		uint32_t status;
   11600 
   11601 		status = CSR_READ(sc, WMREG_STATUS);
   11602 		if (((status & STATUS_2P5_SKU) != 0)
   11603 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11604 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11605 		} else
   11606 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11607 	} else if (sc->sc_type == WM_T_82545) {
   11608 		/* Only 82545 is LX (XXX except SFP) */
   11609 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11610 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11611 	} else {
   11612 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11613 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11614 	}
   11615 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11616 	aprint_normal("\n");
   11617 
   11618 #undef ADD
   11619 
   11620 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11621 }
   11622 
   11623 /*
   11624  * wm_tbi_mediachange:	[ifmedia interface function]
   11625  *
   11626  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11627  */
   11628 static int
   11629 wm_tbi_mediachange(struct ifnet *ifp)
   11630 {
   11631 	struct wm_softc *sc = ifp->if_softc;
   11632 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11633 	uint32_t status, ctrl;
   11634 	bool signal;
   11635 	int i;
   11636 
   11637 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11638 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11639 		/* XXX need some work for >= 82571 and < 82575 */
   11640 		if (sc->sc_type < WM_T_82575)
   11641 			return 0;
   11642 	}
   11643 
   11644 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11645 	    || (sc->sc_type >= WM_T_82575))
   11646 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11647 
   11648 	sc->sc_ctrl &= ~CTRL_LRST;
   11649 	sc->sc_txcw = TXCW_ANE;
   11650 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11651 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11652 	else if (ife->ifm_media & IFM_FDX)
   11653 		sc->sc_txcw |= TXCW_FD;
   11654 	else
   11655 		sc->sc_txcw |= TXCW_HD;
   11656 
   11657 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11658 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11659 
   11660 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11661 		device_xname(sc->sc_dev), sc->sc_txcw));
   11662 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11663 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11664 	CSR_WRITE_FLUSH(sc);
   11665 	delay(1000);
   11666 
   11667 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11668 	signal = wm_tbi_havesignal(sc, ctrl);
   11669 
   11670 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11671 		signal));
   11672 
   11673 	if (signal) {
   11674 		/* Have signal; wait for the link to come up. */
   11675 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11676 			delay(10000);
   11677 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11678 				break;
   11679 		}
   11680 
   11681 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11682 			device_xname(sc->sc_dev),i));
   11683 
   11684 		status = CSR_READ(sc, WMREG_STATUS);
   11685 		DPRINTF(WM_DEBUG_LINK,
   11686 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11687 			device_xname(sc->sc_dev),status, STATUS_LU));
   11688 		if (status & STATUS_LU) {
   11689 			/* Link is up. */
   11690 			DPRINTF(WM_DEBUG_LINK,
   11691 			    ("%s: LINK: set media -> link up %s\n",
   11692 				device_xname(sc->sc_dev),
   11693 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11694 
   11695 			/*
   11696 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11697 			 * so we should update sc->sc_ctrl
   11698 			 */
   11699 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11700 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11701 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11702 			if (status & STATUS_FD)
   11703 				sc->sc_tctl |=
   11704 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11705 			else
   11706 				sc->sc_tctl |=
   11707 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11708 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11709 				sc->sc_fcrtl |= FCRTL_XONE;
   11710 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11711 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11712 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11713 			sc->sc_tbi_linkup = 1;
   11714 		} else {
   11715 			if (i == WM_LINKUP_TIMEOUT)
   11716 				wm_check_for_link(sc);
   11717 			/* Link is down. */
   11718 			DPRINTF(WM_DEBUG_LINK,
   11719 			    ("%s: LINK: set media -> link down\n",
   11720 				device_xname(sc->sc_dev)));
   11721 			sc->sc_tbi_linkup = 0;
   11722 		}
   11723 	} else {
   11724 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11725 			device_xname(sc->sc_dev)));
   11726 		sc->sc_tbi_linkup = 0;
   11727 	}
   11728 
   11729 	wm_tbi_serdes_set_linkled(sc);
   11730 
   11731 	return 0;
   11732 }
   11733 
   11734 /*
   11735  * wm_tbi_mediastatus:	[ifmedia interface function]
   11736  *
   11737  *	Get the current interface media status on a 1000BASE-X device.
   11738  */
   11739 static void
   11740 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11741 {
   11742 	struct wm_softc *sc = ifp->if_softc;
   11743 	uint32_t ctrl, status;
   11744 
   11745 	ifmr->ifm_status = IFM_AVALID;
   11746 	ifmr->ifm_active = IFM_ETHER;
   11747 
   11748 	status = CSR_READ(sc, WMREG_STATUS);
   11749 	if ((status & STATUS_LU) == 0) {
   11750 		ifmr->ifm_active |= IFM_NONE;
   11751 		return;
   11752 	}
   11753 
   11754 	ifmr->ifm_status |= IFM_ACTIVE;
   11755 	/* Only 82545 is LX */
   11756 	if (sc->sc_type == WM_T_82545)
   11757 		ifmr->ifm_active |= IFM_1000_LX;
   11758 	else
   11759 		ifmr->ifm_active |= IFM_1000_SX;
   11760 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11761 		ifmr->ifm_active |= IFM_FDX;
   11762 	else
   11763 		ifmr->ifm_active |= IFM_HDX;
   11764 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11765 	if (ctrl & CTRL_RFCE)
   11766 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11767 	if (ctrl & CTRL_TFCE)
   11768 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11769 }
   11770 
   11771 /* XXX TBI only */
   11772 static int
   11773 wm_check_for_link(struct wm_softc *sc)
   11774 {
   11775 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11776 	uint32_t rxcw;
   11777 	uint32_t ctrl;
   11778 	uint32_t status;
   11779 	bool signal;
   11780 
   11781 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11782 		device_xname(sc->sc_dev), __func__));
   11783 
   11784 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11785 		/* XXX need some work for >= 82571 */
   11786 		if (sc->sc_type >= WM_T_82571) {
   11787 			sc->sc_tbi_linkup = 1;
   11788 			return 0;
   11789 		}
   11790 	}
   11791 
   11792 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11793 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11794 	status = CSR_READ(sc, WMREG_STATUS);
   11795 	signal = wm_tbi_havesignal(sc, ctrl);
   11796 
   11797 	DPRINTF(WM_DEBUG_LINK,
   11798 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11799 		device_xname(sc->sc_dev), __func__, signal,
   11800 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11801 
   11802 	/*
   11803 	 * SWDPIN   LU RXCW
   11804 	 *	0    0	  0
   11805 	 *	0    0	  1	(should not happen)
   11806 	 *	0    1	  0	(should not happen)
   11807 	 *	0    1	  1	(should not happen)
   11808 	 *	1    0	  0	Disable autonego and force linkup
   11809 	 *	1    0	  1	got /C/ but not linkup yet
   11810 	 *	1    1	  0	(linkup)
   11811 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11812 	 *
   11813 	 */
   11814 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11815 		DPRINTF(WM_DEBUG_LINK,
   11816 		    ("%s: %s: force linkup and fullduplex\n",
   11817 			device_xname(sc->sc_dev), __func__));
   11818 		sc->sc_tbi_linkup = 0;
   11819 		/* Disable auto-negotiation in the TXCW register */
   11820 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11821 
   11822 		/*
   11823 		 * Force link-up and also force full-duplex.
   11824 		 *
   11825 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11826 		 * so we should update sc->sc_ctrl
   11827 		 */
   11828 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11829 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11830 	} else if (((status & STATUS_LU) != 0)
   11831 	    && ((rxcw & RXCW_C) != 0)
   11832 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11833 		sc->sc_tbi_linkup = 1;
   11834 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   11835 			device_xname(sc->sc_dev),
   11836 			__func__));
   11837 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11838 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11839 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   11840 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   11841 			device_xname(sc->sc_dev), __func__));
   11842 	} else {
   11843 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   11844 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   11845 			status));
   11846 	}
   11847 
   11848 	return 0;
   11849 }
   11850 
   11851 /*
   11852  * wm_tbi_tick:
   11853  *
   11854  *	Check the link on TBI devices.
   11855  *	This function acts as mii_tick().
   11856  */
   11857 static void
   11858 wm_tbi_tick(struct wm_softc *sc)
   11859 {
   11860 	struct mii_data *mii = &sc->sc_mii;
   11861 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11862 	uint32_t status;
   11863 
   11864 	KASSERT(WM_CORE_LOCKED(sc));
   11865 
   11866 	status = CSR_READ(sc, WMREG_STATUS);
   11867 
   11868 	/* XXX is this needed? */
   11869 	(void)CSR_READ(sc, WMREG_RXCW);
   11870 	(void)CSR_READ(sc, WMREG_CTRL);
   11871 
   11872 	/* set link status */
   11873 	if ((status & STATUS_LU) == 0) {
   11874 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   11875 			device_xname(sc->sc_dev)));
   11876 		sc->sc_tbi_linkup = 0;
   11877 	} else if (sc->sc_tbi_linkup == 0) {
   11878 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   11879 			device_xname(sc->sc_dev),
   11880 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11881 		sc->sc_tbi_linkup = 1;
   11882 		sc->sc_tbi_serdes_ticks = 0;
   11883 	}
   11884 
   11885 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11886 		goto setled;
   11887 
   11888 	if ((status & STATUS_LU) == 0) {
   11889 		sc->sc_tbi_linkup = 0;
   11890 		/* If the timer expired, retry autonegotiation */
   11891 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11892 		    && (++sc->sc_tbi_serdes_ticks
   11893 			>= sc->sc_tbi_serdes_anegticks)) {
   11894 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11895 			sc->sc_tbi_serdes_ticks = 0;
   11896 			/*
   11897 			 * Reset the link, and let autonegotiation do
   11898 			 * its thing
   11899 			 */
   11900 			sc->sc_ctrl |= CTRL_LRST;
   11901 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11902 			CSR_WRITE_FLUSH(sc);
   11903 			delay(1000);
   11904 			sc->sc_ctrl &= ~CTRL_LRST;
   11905 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11906 			CSR_WRITE_FLUSH(sc);
   11907 			delay(1000);
   11908 			CSR_WRITE(sc, WMREG_TXCW,
   11909 			    sc->sc_txcw & ~TXCW_ANE);
   11910 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11911 		}
   11912 	}
   11913 
   11914 setled:
   11915 	wm_tbi_serdes_set_linkled(sc);
   11916 }
   11917 
   11918 /* SERDES related */
   11919 static void
   11920 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11921 {
   11922 	uint32_t reg;
   11923 
   11924 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11925 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11926 		return;
   11927 
   11928 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11929 	reg |= PCS_CFG_PCS_EN;
   11930 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11931 
   11932 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11933 	reg &= ~CTRL_EXT_SWDPIN(3);
   11934 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11935 	CSR_WRITE_FLUSH(sc);
   11936 }
   11937 
   11938 static int
   11939 wm_serdes_mediachange(struct ifnet *ifp)
   11940 {
   11941 	struct wm_softc *sc = ifp->if_softc;
   11942 	bool pcs_autoneg = true; /* XXX */
   11943 	uint32_t ctrl_ext, pcs_lctl, reg;
   11944 
   11945 	/* XXX Currently, this function is not called on 8257[12] */
   11946 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11947 	    || (sc->sc_type >= WM_T_82575))
   11948 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11949 
   11950 	wm_serdes_power_up_link_82575(sc);
   11951 
   11952 	sc->sc_ctrl |= CTRL_SLU;
   11953 
   11954 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11955 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11956 
   11957 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11958 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11959 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11960 	case CTRL_EXT_LINK_MODE_SGMII:
   11961 		pcs_autoneg = true;
   11962 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11963 		break;
   11964 	case CTRL_EXT_LINK_MODE_1000KX:
   11965 		pcs_autoneg = false;
   11966 		/* FALLTHROUGH */
   11967 	default:
   11968 		if ((sc->sc_type == WM_T_82575)
   11969 		    || (sc->sc_type == WM_T_82576)) {
   11970 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11971 				pcs_autoneg = false;
   11972 		}
   11973 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11974 		    | CTRL_FRCFDX;
   11975 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11976 	}
   11977 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11978 
   11979 	if (pcs_autoneg) {
   11980 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11981 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11982 
   11983 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11984 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11985 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11986 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11987 	} else
   11988 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11989 
   11990 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11991 
   11992 
   11993 	return 0;
   11994 }
   11995 
   11996 static void
   11997 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11998 {
   11999 	struct wm_softc *sc = ifp->if_softc;
   12000 	struct mii_data *mii = &sc->sc_mii;
   12001 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   12002 	uint32_t pcs_adv, pcs_lpab, reg;
   12003 
   12004 	ifmr->ifm_status = IFM_AVALID;
   12005 	ifmr->ifm_active = IFM_ETHER;
   12006 
   12007 	/* Check PCS */
   12008 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12009 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   12010 		ifmr->ifm_active |= IFM_NONE;
   12011 		sc->sc_tbi_linkup = 0;
   12012 		goto setled;
   12013 	}
   12014 
   12015 	sc->sc_tbi_linkup = 1;
   12016 	ifmr->ifm_status |= IFM_ACTIVE;
   12017 	if (sc->sc_type == WM_T_I354) {
   12018 		uint32_t status;
   12019 
   12020 		status = CSR_READ(sc, WMREG_STATUS);
   12021 		if (((status & STATUS_2P5_SKU) != 0)
   12022 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   12023 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   12024 		} else
   12025 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   12026 	} else {
   12027 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   12028 		case PCS_LSTS_SPEED_10:
   12029 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   12030 			break;
   12031 		case PCS_LSTS_SPEED_100:
   12032 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   12033 			break;
   12034 		case PCS_LSTS_SPEED_1000:
   12035 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12036 			break;
   12037 		default:
   12038 			device_printf(sc->sc_dev, "Unknown speed\n");
   12039 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   12040 			break;
   12041 		}
   12042 	}
   12043 	if ((reg & PCS_LSTS_FDX) != 0)
   12044 		ifmr->ifm_active |= IFM_FDX;
   12045 	else
   12046 		ifmr->ifm_active |= IFM_HDX;
   12047 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   12048 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   12049 		/* Check flow */
   12050 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12051 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   12052 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   12053 			goto setled;
   12054 		}
   12055 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   12056 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   12057 		DPRINTF(WM_DEBUG_LINK,
   12058 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   12059 		if ((pcs_adv & TXCW_SYM_PAUSE)
   12060 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   12061 			mii->mii_media_active |= IFM_FLOW
   12062 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   12063 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   12064 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12065 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   12066 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12067 			mii->mii_media_active |= IFM_FLOW
   12068 			    | IFM_ETH_TXPAUSE;
   12069 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   12070 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   12071 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   12072 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   12073 			mii->mii_media_active |= IFM_FLOW
   12074 			    | IFM_ETH_RXPAUSE;
   12075 		}
   12076 	}
   12077 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   12078 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   12079 setled:
   12080 	wm_tbi_serdes_set_linkled(sc);
   12081 }
   12082 
   12083 /*
   12084  * wm_serdes_tick:
   12085  *
   12086  *	Check the link on serdes devices.
   12087  */
   12088 static void
   12089 wm_serdes_tick(struct wm_softc *sc)
   12090 {
   12091 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   12092 	struct mii_data *mii = &sc->sc_mii;
   12093 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   12094 	uint32_t reg;
   12095 
   12096 	KASSERT(WM_CORE_LOCKED(sc));
   12097 
   12098 	mii->mii_media_status = IFM_AVALID;
   12099 	mii->mii_media_active = IFM_ETHER;
   12100 
   12101 	/* Check PCS */
   12102 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   12103 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   12104 		mii->mii_media_status |= IFM_ACTIVE;
   12105 		sc->sc_tbi_linkup = 1;
   12106 		sc->sc_tbi_serdes_ticks = 0;
   12107 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   12108 		if ((reg & PCS_LSTS_FDX) != 0)
   12109 			mii->mii_media_active |= IFM_FDX;
   12110 		else
   12111 			mii->mii_media_active |= IFM_HDX;
   12112 	} else {
   12113 		mii->mii_media_status |= IFM_NONE;
   12114 		sc->sc_tbi_linkup = 0;
   12115 		/* If the timer expired, retry autonegotiation */
   12116 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   12117 		    && (++sc->sc_tbi_serdes_ticks
   12118 			>= sc->sc_tbi_serdes_anegticks)) {
   12119 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   12120 			sc->sc_tbi_serdes_ticks = 0;
   12121 			/* XXX */
   12122 			wm_serdes_mediachange(ifp);
   12123 		}
   12124 	}
   12125 
   12126 	wm_tbi_serdes_set_linkled(sc);
   12127 }
   12128 
   12129 /* SFP related */
   12130 
   12131 static int
   12132 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   12133 {
   12134 	uint32_t i2ccmd;
   12135 	int i;
   12136 
   12137 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   12138 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   12139 
   12140 	/* Poll the ready bit */
   12141 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   12142 		delay(50);
   12143 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   12144 		if (i2ccmd & I2CCMD_READY)
   12145 			break;
   12146 	}
   12147 	if ((i2ccmd & I2CCMD_READY) == 0)
   12148 		return -1;
   12149 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   12150 		return -1;
   12151 
   12152 	*data = i2ccmd & 0x00ff;
   12153 
   12154 	return 0;
   12155 }
   12156 
   12157 static uint32_t
   12158 wm_sfp_get_media_type(struct wm_softc *sc)
   12159 {
   12160 	uint32_t ctrl_ext;
   12161 	uint8_t val = 0;
   12162 	int timeout = 3;
   12163 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   12164 	int rv = -1;
   12165 
   12166 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12167 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   12168 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   12169 	CSR_WRITE_FLUSH(sc);
   12170 
   12171 	/* Read SFP module data */
   12172 	while (timeout) {
   12173 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   12174 		if (rv == 0)
   12175 			break;
   12176 		delay(100*1000); /* XXX too big */
   12177 		timeout--;
   12178 	}
   12179 	if (rv != 0)
   12180 		goto out;
   12181 	switch (val) {
   12182 	case SFF_SFP_ID_SFF:
   12183 		aprint_normal_dev(sc->sc_dev,
   12184 		    "Module/Connector soldered to board\n");
   12185 		break;
   12186 	case SFF_SFP_ID_SFP:
   12187 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   12188 		break;
   12189 	case SFF_SFP_ID_UNKNOWN:
   12190 		goto out;
   12191 	default:
   12192 		break;
   12193 	}
   12194 
   12195 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   12196 	if (rv != 0) {
   12197 		goto out;
   12198 	}
   12199 
   12200 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   12201 		mediatype = WM_MEDIATYPE_SERDES;
   12202 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   12203 		sc->sc_flags |= WM_F_SGMII;
   12204 		mediatype = WM_MEDIATYPE_COPPER;
   12205 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   12206 		sc->sc_flags |= WM_F_SGMII;
   12207 		mediatype = WM_MEDIATYPE_SERDES;
   12208 	}
   12209 
   12210 out:
   12211 	/* Restore I2C interface setting */
   12212 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12213 
   12214 	return mediatype;
   12215 }
   12216 
   12217 /*
   12218  * NVM related.
   12219  * Microwire, SPI (w/wo EERD) and Flash.
   12220  */
   12221 
   12222 /* Both spi and uwire */
   12223 
   12224 /*
   12225  * wm_eeprom_sendbits:
   12226  *
   12227  *	Send a series of bits to the EEPROM.
   12228  */
   12229 static void
   12230 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   12231 {
   12232 	uint32_t reg;
   12233 	int x;
   12234 
   12235 	reg = CSR_READ(sc, WMREG_EECD);
   12236 
   12237 	for (x = nbits; x > 0; x--) {
   12238 		if (bits & (1U << (x - 1)))
   12239 			reg |= EECD_DI;
   12240 		else
   12241 			reg &= ~EECD_DI;
   12242 		CSR_WRITE(sc, WMREG_EECD, reg);
   12243 		CSR_WRITE_FLUSH(sc);
   12244 		delay(2);
   12245 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12246 		CSR_WRITE_FLUSH(sc);
   12247 		delay(2);
   12248 		CSR_WRITE(sc, WMREG_EECD, reg);
   12249 		CSR_WRITE_FLUSH(sc);
   12250 		delay(2);
   12251 	}
   12252 }
   12253 
   12254 /*
   12255  * wm_eeprom_recvbits:
   12256  *
   12257  *	Receive a series of bits from the EEPROM.
   12258  */
   12259 static void
   12260 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   12261 {
   12262 	uint32_t reg, val;
   12263 	int x;
   12264 
   12265 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   12266 
   12267 	val = 0;
   12268 	for (x = nbits; x > 0; x--) {
   12269 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   12270 		CSR_WRITE_FLUSH(sc);
   12271 		delay(2);
   12272 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   12273 			val |= (1U << (x - 1));
   12274 		CSR_WRITE(sc, WMREG_EECD, reg);
   12275 		CSR_WRITE_FLUSH(sc);
   12276 		delay(2);
   12277 	}
   12278 	*valp = val;
   12279 }
   12280 
   12281 /* Microwire */
   12282 
   12283 /*
   12284  * wm_nvm_read_uwire:
   12285  *
   12286  *	Read a word from the EEPROM using the MicroWire protocol.
   12287  */
   12288 static int
   12289 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12290 {
   12291 	uint32_t reg, val;
   12292 	int i;
   12293 
   12294 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12295 		device_xname(sc->sc_dev), __func__));
   12296 
   12297 	if (sc->nvm.acquire(sc) != 0)
   12298 		return -1;
   12299 
   12300 	for (i = 0; i < wordcnt; i++) {
   12301 		/* Clear SK and DI. */
   12302 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   12303 		CSR_WRITE(sc, WMREG_EECD, reg);
   12304 
   12305 		/*
   12306 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   12307 		 * and Xen.
   12308 		 *
   12309 		 * We use this workaround only for 82540 because qemu's
   12310 		 * e1000 act as 82540.
   12311 		 */
   12312 		if (sc->sc_type == WM_T_82540) {
   12313 			reg |= EECD_SK;
   12314 			CSR_WRITE(sc, WMREG_EECD, reg);
   12315 			reg &= ~EECD_SK;
   12316 			CSR_WRITE(sc, WMREG_EECD, reg);
   12317 			CSR_WRITE_FLUSH(sc);
   12318 			delay(2);
   12319 		}
   12320 		/* XXX: end of workaround */
   12321 
   12322 		/* Set CHIP SELECT. */
   12323 		reg |= EECD_CS;
   12324 		CSR_WRITE(sc, WMREG_EECD, reg);
   12325 		CSR_WRITE_FLUSH(sc);
   12326 		delay(2);
   12327 
   12328 		/* Shift in the READ command. */
   12329 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12330 
   12331 		/* Shift in address. */
   12332 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12333 
   12334 		/* Shift out the data. */
   12335 		wm_eeprom_recvbits(sc, &val, 16);
   12336 		data[i] = val & 0xffff;
   12337 
   12338 		/* Clear CHIP SELECT. */
   12339 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12340 		CSR_WRITE(sc, WMREG_EECD, reg);
   12341 		CSR_WRITE_FLUSH(sc);
   12342 		delay(2);
   12343 	}
   12344 
   12345 	sc->nvm.release(sc);
   12346 	return 0;
   12347 }
   12348 
   12349 /* SPI */
   12350 
   12351 /*
   12352  * Set SPI and FLASH related information from the EECD register.
   12353  * For 82541 and 82547, the word size is taken from EEPROM.
   12354  */
   12355 static int
   12356 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12357 {
   12358 	int size;
   12359 	uint32_t reg;
   12360 	uint16_t data;
   12361 
   12362 	reg = CSR_READ(sc, WMREG_EECD);
   12363 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12364 
   12365 	/* Read the size of NVM from EECD by default */
   12366 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12367 	switch (sc->sc_type) {
   12368 	case WM_T_82541:
   12369 	case WM_T_82541_2:
   12370 	case WM_T_82547:
   12371 	case WM_T_82547_2:
   12372 		/* Set dummy value to access EEPROM */
   12373 		sc->sc_nvm_wordsize = 64;
   12374 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12375 			aprint_error_dev(sc->sc_dev,
   12376 			    "%s: failed to read EEPROM size\n", __func__);
   12377 		}
   12378 		reg = data;
   12379 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12380 		if (size == 0)
   12381 			size = 6; /* 64 word size */
   12382 		else
   12383 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12384 		break;
   12385 	case WM_T_80003:
   12386 	case WM_T_82571:
   12387 	case WM_T_82572:
   12388 	case WM_T_82573: /* SPI case */
   12389 	case WM_T_82574: /* SPI case */
   12390 	case WM_T_82583: /* SPI case */
   12391 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12392 		if (size > 14)
   12393 			size = 14;
   12394 		break;
   12395 	case WM_T_82575:
   12396 	case WM_T_82576:
   12397 	case WM_T_82580:
   12398 	case WM_T_I350:
   12399 	case WM_T_I354:
   12400 	case WM_T_I210:
   12401 	case WM_T_I211:
   12402 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12403 		if (size > 15)
   12404 			size = 15;
   12405 		break;
   12406 	default:
   12407 		aprint_error_dev(sc->sc_dev,
   12408 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12409 		return -1;
   12410 		break;
   12411 	}
   12412 
   12413 	sc->sc_nvm_wordsize = 1 << size;
   12414 
   12415 	return 0;
   12416 }
   12417 
   12418 /*
   12419  * wm_nvm_ready_spi:
   12420  *
   12421  *	Wait for a SPI EEPROM to be ready for commands.
   12422  */
   12423 static int
   12424 wm_nvm_ready_spi(struct wm_softc *sc)
   12425 {
   12426 	uint32_t val;
   12427 	int usec;
   12428 
   12429 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12430 		device_xname(sc->sc_dev), __func__));
   12431 
   12432 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12433 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12434 		wm_eeprom_recvbits(sc, &val, 8);
   12435 		if ((val & SPI_SR_RDY) == 0)
   12436 			break;
   12437 	}
   12438 	if (usec >= SPI_MAX_RETRIES) {
   12439 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12440 		return -1;
   12441 	}
   12442 	return 0;
   12443 }
   12444 
   12445 /*
   12446  * wm_nvm_read_spi:
   12447  *
   12448  *	Read a work from the EEPROM using the SPI protocol.
   12449  */
   12450 static int
   12451 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12452 {
   12453 	uint32_t reg, val;
   12454 	int i;
   12455 	uint8_t opc;
   12456 	int rv = 0;
   12457 
   12458 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12459 		device_xname(sc->sc_dev), __func__));
   12460 
   12461 	if (sc->nvm.acquire(sc) != 0)
   12462 		return -1;
   12463 
   12464 	/* Clear SK and CS. */
   12465 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12466 	CSR_WRITE(sc, WMREG_EECD, reg);
   12467 	CSR_WRITE_FLUSH(sc);
   12468 	delay(2);
   12469 
   12470 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12471 		goto out;
   12472 
   12473 	/* Toggle CS to flush commands. */
   12474 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12475 	CSR_WRITE_FLUSH(sc);
   12476 	delay(2);
   12477 	CSR_WRITE(sc, WMREG_EECD, reg);
   12478 	CSR_WRITE_FLUSH(sc);
   12479 	delay(2);
   12480 
   12481 	opc = SPI_OPC_READ;
   12482 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12483 		opc |= SPI_OPC_A8;
   12484 
   12485 	wm_eeprom_sendbits(sc, opc, 8);
   12486 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12487 
   12488 	for (i = 0; i < wordcnt; i++) {
   12489 		wm_eeprom_recvbits(sc, &val, 16);
   12490 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12491 	}
   12492 
   12493 	/* Raise CS and clear SK. */
   12494 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12495 	CSR_WRITE(sc, WMREG_EECD, reg);
   12496 	CSR_WRITE_FLUSH(sc);
   12497 	delay(2);
   12498 
   12499 out:
   12500 	sc->nvm.release(sc);
   12501 	return rv;
   12502 }
   12503 
   12504 /* Using with EERD */
   12505 
   12506 static int
   12507 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12508 {
   12509 	uint32_t attempts = 100000;
   12510 	uint32_t i, reg = 0;
   12511 	int32_t done = -1;
   12512 
   12513 	for (i = 0; i < attempts; i++) {
   12514 		reg = CSR_READ(sc, rw);
   12515 
   12516 		if (reg & EERD_DONE) {
   12517 			done = 0;
   12518 			break;
   12519 		}
   12520 		delay(5);
   12521 	}
   12522 
   12523 	return done;
   12524 }
   12525 
   12526 static int
   12527 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12528 {
   12529 	int i, eerd = 0;
   12530 	int rv = 0;
   12531 
   12532 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12533 		device_xname(sc->sc_dev), __func__));
   12534 
   12535 	if (sc->nvm.acquire(sc) != 0)
   12536 		return -1;
   12537 
   12538 	for (i = 0; i < wordcnt; i++) {
   12539 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12540 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12541 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12542 		if (rv != 0) {
   12543 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12544 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12545 			break;
   12546 		}
   12547 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12548 	}
   12549 
   12550 	sc->nvm.release(sc);
   12551 	return rv;
   12552 }
   12553 
   12554 /* Flash */
   12555 
   12556 static int
   12557 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12558 {
   12559 	uint32_t eecd;
   12560 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12561 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12562 	uint32_t nvm_dword = 0;
   12563 	uint8_t sig_byte = 0;
   12564 	int rv;
   12565 
   12566 	switch (sc->sc_type) {
   12567 	case WM_T_PCH_SPT:
   12568 	case WM_T_PCH_CNP:
   12569 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12570 		act_offset = ICH_NVM_SIG_WORD * 2;
   12571 
   12572 		/* set bank to 0 in case flash read fails. */
   12573 		*bank = 0;
   12574 
   12575 		/* Check bank 0 */
   12576 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12577 		if (rv != 0)
   12578 			return rv;
   12579 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12580 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12581 			*bank = 0;
   12582 			return 0;
   12583 		}
   12584 
   12585 		/* Check bank 1 */
   12586 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12587 		    &nvm_dword);
   12588 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12589 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12590 			*bank = 1;
   12591 			return 0;
   12592 		}
   12593 		aprint_error_dev(sc->sc_dev,
   12594 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12595 		return -1;
   12596 	case WM_T_ICH8:
   12597 	case WM_T_ICH9:
   12598 		eecd = CSR_READ(sc, WMREG_EECD);
   12599 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12600 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12601 			return 0;
   12602 		}
   12603 		/* FALLTHROUGH */
   12604 	default:
   12605 		/* Default to 0 */
   12606 		*bank = 0;
   12607 
   12608 		/* Check bank 0 */
   12609 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12610 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12611 			*bank = 0;
   12612 			return 0;
   12613 		}
   12614 
   12615 		/* Check bank 1 */
   12616 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12617 		    &sig_byte);
   12618 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12619 			*bank = 1;
   12620 			return 0;
   12621 		}
   12622 	}
   12623 
   12624 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12625 		device_xname(sc->sc_dev)));
   12626 	return -1;
   12627 }
   12628 
   12629 /******************************************************************************
   12630  * This function does initial flash setup so that a new read/write/erase cycle
   12631  * can be started.
   12632  *
   12633  * sc - The pointer to the hw structure
   12634  ****************************************************************************/
   12635 static int32_t
   12636 wm_ich8_cycle_init(struct wm_softc *sc)
   12637 {
   12638 	uint16_t hsfsts;
   12639 	int32_t error = 1;
   12640 	int32_t i     = 0;
   12641 
   12642 	if (sc->sc_type >= WM_T_PCH_SPT)
   12643 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12644 	else
   12645 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12646 
   12647 	/* May be check the Flash Des Valid bit in Hw status */
   12648 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12649 		return error;
   12650 
   12651 	/* Clear FCERR in Hw status by writing 1 */
   12652 	/* Clear DAEL in Hw status by writing a 1 */
   12653 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12654 
   12655 	if (sc->sc_type >= WM_T_PCH_SPT)
   12656 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12657 	else
   12658 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12659 
   12660 	/*
   12661 	 * Either we should have a hardware SPI cycle in progress bit to check
   12662 	 * against, in order to start a new cycle or FDONE bit should be
   12663 	 * changed in the hardware so that it is 1 after harware reset, which
   12664 	 * can then be used as an indication whether a cycle is in progress or
   12665 	 * has been completed .. we should also have some software semaphore
   12666 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12667 	 * threads access to those bits can be sequentiallized or a way so that
   12668 	 * 2 threads dont start the cycle at the same time
   12669 	 */
   12670 
   12671 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12672 		/*
   12673 		 * There is no cycle running at present, so we can start a
   12674 		 * cycle
   12675 		 */
   12676 
   12677 		/* Begin by setting Flash Cycle Done. */
   12678 		hsfsts |= HSFSTS_DONE;
   12679 		if (sc->sc_type >= WM_T_PCH_SPT)
   12680 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12681 			    hsfsts & 0xffffUL);
   12682 		else
   12683 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12684 		error = 0;
   12685 	} else {
   12686 		/*
   12687 		 * otherwise poll for sometime so the current cycle has a
   12688 		 * chance to end before giving up.
   12689 		 */
   12690 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12691 			if (sc->sc_type >= WM_T_PCH_SPT)
   12692 				hsfsts = ICH8_FLASH_READ32(sc,
   12693 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12694 			else
   12695 				hsfsts = ICH8_FLASH_READ16(sc,
   12696 				    ICH_FLASH_HSFSTS);
   12697 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12698 				error = 0;
   12699 				break;
   12700 			}
   12701 			delay(1);
   12702 		}
   12703 		if (error == 0) {
   12704 			/*
   12705 			 * Successful in waiting for previous cycle to timeout,
   12706 			 * now set the Flash Cycle Done.
   12707 			 */
   12708 			hsfsts |= HSFSTS_DONE;
   12709 			if (sc->sc_type >= WM_T_PCH_SPT)
   12710 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12711 				    hsfsts & 0xffffUL);
   12712 			else
   12713 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12714 				    hsfsts);
   12715 		}
   12716 	}
   12717 	return error;
   12718 }
   12719 
   12720 /******************************************************************************
   12721  * This function starts a flash cycle and waits for its completion
   12722  *
   12723  * sc - The pointer to the hw structure
   12724  ****************************************************************************/
   12725 static int32_t
   12726 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12727 {
   12728 	uint16_t hsflctl;
   12729 	uint16_t hsfsts;
   12730 	int32_t error = 1;
   12731 	uint32_t i = 0;
   12732 
   12733 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12734 	if (sc->sc_type >= WM_T_PCH_SPT)
   12735 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12736 	else
   12737 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12738 	hsflctl |= HSFCTL_GO;
   12739 	if (sc->sc_type >= WM_T_PCH_SPT)
   12740 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12741 		    (uint32_t)hsflctl << 16);
   12742 	else
   12743 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12744 
   12745 	/* Wait till FDONE bit is set to 1 */
   12746 	do {
   12747 		if (sc->sc_type >= WM_T_PCH_SPT)
   12748 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12749 			    & 0xffffUL;
   12750 		else
   12751 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12752 		if (hsfsts & HSFSTS_DONE)
   12753 			break;
   12754 		delay(1);
   12755 		i++;
   12756 	} while (i < timeout);
   12757 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12758 		error = 0;
   12759 
   12760 	return error;
   12761 }
   12762 
   12763 /******************************************************************************
   12764  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12765  *
   12766  * sc - The pointer to the hw structure
   12767  * index - The index of the byte or word to read.
   12768  * size - Size of data to read, 1=byte 2=word, 4=dword
   12769  * data - Pointer to the word to store the value read.
   12770  *****************************************************************************/
   12771 static int32_t
   12772 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12773     uint32_t size, uint32_t *data)
   12774 {
   12775 	uint16_t hsfsts;
   12776 	uint16_t hsflctl;
   12777 	uint32_t flash_linear_address;
   12778 	uint32_t flash_data = 0;
   12779 	int32_t error = 1;
   12780 	int32_t count = 0;
   12781 
   12782 	if (size < 1  || size > 4 || data == 0x0 ||
   12783 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12784 		return error;
   12785 
   12786 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12787 	    sc->sc_ich8_flash_base;
   12788 
   12789 	do {
   12790 		delay(1);
   12791 		/* Steps */
   12792 		error = wm_ich8_cycle_init(sc);
   12793 		if (error)
   12794 			break;
   12795 
   12796 		if (sc->sc_type >= WM_T_PCH_SPT)
   12797 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12798 			    >> 16;
   12799 		else
   12800 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12801 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12802 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12803 		    & HSFCTL_BCOUNT_MASK;
   12804 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12805 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12806 			/*
   12807 			 * In SPT, This register is in Lan memory space, not
   12808 			 * flash. Therefore, only 32 bit access is supported.
   12809 			 */
   12810 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12811 			    (uint32_t)hsflctl << 16);
   12812 		} else
   12813 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12814 
   12815 		/*
   12816 		 * Write the last 24 bits of index into Flash Linear address
   12817 		 * field in Flash Address
   12818 		 */
   12819 		/* TODO: TBD maybe check the index against the size of flash */
   12820 
   12821 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12822 
   12823 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12824 
   12825 		/*
   12826 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12827 		 * the whole sequence a few more times, else read in (shift in)
   12828 		 * the Flash Data0, the order is least significant byte first
   12829 		 * msb to lsb
   12830 		 */
   12831 		if (error == 0) {
   12832 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12833 			if (size == 1)
   12834 				*data = (uint8_t)(flash_data & 0x000000FF);
   12835 			else if (size == 2)
   12836 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12837 			else if (size == 4)
   12838 				*data = (uint32_t)flash_data;
   12839 			break;
   12840 		} else {
   12841 			/*
   12842 			 * If we've gotten here, then things are probably
   12843 			 * completely hosed, but if the error condition is
   12844 			 * detected, it won't hurt to give it another try...
   12845 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12846 			 */
   12847 			if (sc->sc_type >= WM_T_PCH_SPT)
   12848 				hsfsts = ICH8_FLASH_READ32(sc,
   12849 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12850 			else
   12851 				hsfsts = ICH8_FLASH_READ16(sc,
   12852 				    ICH_FLASH_HSFSTS);
   12853 
   12854 			if (hsfsts & HSFSTS_ERR) {
   12855 				/* Repeat for some time before giving up. */
   12856 				continue;
   12857 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12858 				break;
   12859 		}
   12860 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12861 
   12862 	return error;
   12863 }
   12864 
   12865 /******************************************************************************
   12866  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12867  *
   12868  * sc - pointer to wm_hw structure
   12869  * index - The index of the byte to read.
   12870  * data - Pointer to a byte to store the value read.
   12871  *****************************************************************************/
   12872 static int32_t
   12873 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12874 {
   12875 	int32_t status;
   12876 	uint32_t word = 0;
   12877 
   12878 	status = wm_read_ich8_data(sc, index, 1, &word);
   12879 	if (status == 0)
   12880 		*data = (uint8_t)word;
   12881 	else
   12882 		*data = 0;
   12883 
   12884 	return status;
   12885 }
   12886 
   12887 /******************************************************************************
   12888  * Reads a word from the NVM using the ICH8 flash access registers.
   12889  *
   12890  * sc - pointer to wm_hw structure
   12891  * index - The starting byte index of the word to read.
   12892  * data - Pointer to a word to store the value read.
   12893  *****************************************************************************/
   12894 static int32_t
   12895 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12896 {
   12897 	int32_t status;
   12898 	uint32_t word = 0;
   12899 
   12900 	status = wm_read_ich8_data(sc, index, 2, &word);
   12901 	if (status == 0)
   12902 		*data = (uint16_t)word;
   12903 	else
   12904 		*data = 0;
   12905 
   12906 	return status;
   12907 }
   12908 
   12909 /******************************************************************************
   12910  * Reads a dword from the NVM using the ICH8 flash access registers.
   12911  *
   12912  * sc - pointer to wm_hw structure
   12913  * index - The starting byte index of the word to read.
   12914  * data - Pointer to a word to store the value read.
   12915  *****************************************************************************/
   12916 static int32_t
   12917 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12918 {
   12919 	int32_t status;
   12920 
   12921 	status = wm_read_ich8_data(sc, index, 4, data);
   12922 	return status;
   12923 }
   12924 
   12925 /******************************************************************************
   12926  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12927  * register.
   12928  *
   12929  * sc - Struct containing variables accessed by shared code
   12930  * offset - offset of word in the EEPROM to read
   12931  * data - word read from the EEPROM
   12932  * words - number of words to read
   12933  *****************************************************************************/
   12934 static int
   12935 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12936 {
   12937 	int32_t	 rv = 0;
   12938 	uint32_t flash_bank = 0;
   12939 	uint32_t act_offset = 0;
   12940 	uint32_t bank_offset = 0;
   12941 	uint16_t word = 0;
   12942 	uint16_t i = 0;
   12943 
   12944 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12945 		device_xname(sc->sc_dev), __func__));
   12946 
   12947 	if (sc->nvm.acquire(sc) != 0)
   12948 		return -1;
   12949 
   12950 	/*
   12951 	 * We need to know which is the valid flash bank.  In the event
   12952 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12953 	 * managing flash_bank. So it cannot be trusted and needs
   12954 	 * to be updated with each read.
   12955 	 */
   12956 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12957 	if (rv) {
   12958 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12959 			device_xname(sc->sc_dev)));
   12960 		flash_bank = 0;
   12961 	}
   12962 
   12963 	/*
   12964 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12965 	 * size
   12966 	 */
   12967 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12968 
   12969 	for (i = 0; i < words; i++) {
   12970 		/* The NVM part needs a byte offset, hence * 2 */
   12971 		act_offset = bank_offset + ((offset + i) * 2);
   12972 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12973 		if (rv) {
   12974 			aprint_error_dev(sc->sc_dev,
   12975 			    "%s: failed to read NVM\n", __func__);
   12976 			break;
   12977 		}
   12978 		data[i] = word;
   12979 	}
   12980 
   12981 	sc->nvm.release(sc);
   12982 	return rv;
   12983 }
   12984 
   12985 /******************************************************************************
   12986  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12987  * register.
   12988  *
   12989  * sc - Struct containing variables accessed by shared code
   12990  * offset - offset of word in the EEPROM to read
   12991  * data - word read from the EEPROM
   12992  * words - number of words to read
   12993  *****************************************************************************/
   12994 static int
   12995 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12996 {
   12997 	int32_t	 rv = 0;
   12998 	uint32_t flash_bank = 0;
   12999 	uint32_t act_offset = 0;
   13000 	uint32_t bank_offset = 0;
   13001 	uint32_t dword = 0;
   13002 	uint16_t i = 0;
   13003 
   13004 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13005 		device_xname(sc->sc_dev), __func__));
   13006 
   13007 	if (sc->nvm.acquire(sc) != 0)
   13008 		return -1;
   13009 
   13010 	/*
   13011 	 * We need to know which is the valid flash bank.  In the event
   13012 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   13013 	 * managing flash_bank. So it cannot be trusted and needs
   13014 	 * to be updated with each read.
   13015 	 */
   13016 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   13017 	if (rv) {
   13018 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   13019 			device_xname(sc->sc_dev)));
   13020 		flash_bank = 0;
   13021 	}
   13022 
   13023 	/*
   13024 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   13025 	 * size
   13026 	 */
   13027 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   13028 
   13029 	for (i = 0; i < words; i++) {
   13030 		/* The NVM part needs a byte offset, hence * 2 */
   13031 		act_offset = bank_offset + ((offset + i) * 2);
   13032 		/* but we must read dword aligned, so mask ... */
   13033 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   13034 		if (rv) {
   13035 			aprint_error_dev(sc->sc_dev,
   13036 			    "%s: failed to read NVM\n", __func__);
   13037 			break;
   13038 		}
   13039 		/* ... and pick out low or high word */
   13040 		if ((act_offset & 0x2) == 0)
   13041 			data[i] = (uint16_t)(dword & 0xFFFF);
   13042 		else
   13043 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   13044 	}
   13045 
   13046 	sc->nvm.release(sc);
   13047 	return rv;
   13048 }
   13049 
   13050 /* iNVM */
   13051 
   13052 static int
   13053 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   13054 {
   13055 	int32_t	 rv = 0;
   13056 	uint32_t invm_dword;
   13057 	uint16_t i;
   13058 	uint8_t record_type, word_address;
   13059 
   13060 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13061 		device_xname(sc->sc_dev), __func__));
   13062 
   13063 	for (i = 0; i < INVM_SIZE; i++) {
   13064 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   13065 		/* Get record type */
   13066 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   13067 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   13068 			break;
   13069 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   13070 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   13071 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   13072 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   13073 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   13074 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   13075 			if (word_address == address) {
   13076 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   13077 				rv = 0;
   13078 				break;
   13079 			}
   13080 		}
   13081 	}
   13082 
   13083 	return rv;
   13084 }
   13085 
   13086 static int
   13087 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   13088 {
   13089 	int rv = 0;
   13090 	int i;
   13091 
   13092 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13093 		device_xname(sc->sc_dev), __func__));
   13094 
   13095 	if (sc->nvm.acquire(sc) != 0)
   13096 		return -1;
   13097 
   13098 	for (i = 0; i < words; i++) {
   13099 		switch (offset + i) {
   13100 		case NVM_OFF_MACADDR:
   13101 		case NVM_OFF_MACADDR1:
   13102 		case NVM_OFF_MACADDR2:
   13103 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   13104 			if (rv != 0) {
   13105 				data[i] = 0xffff;
   13106 				rv = -1;
   13107 			}
   13108 			break;
   13109 		case NVM_OFF_CFG2:
   13110 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13111 			if (rv != 0) {
   13112 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   13113 				rv = 0;
   13114 			}
   13115 			break;
   13116 		case NVM_OFF_CFG4:
   13117 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13118 			if (rv != 0) {
   13119 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   13120 				rv = 0;
   13121 			}
   13122 			break;
   13123 		case NVM_OFF_LED_1_CFG:
   13124 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13125 			if (rv != 0) {
   13126 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   13127 				rv = 0;
   13128 			}
   13129 			break;
   13130 		case NVM_OFF_LED_0_2_CFG:
   13131 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13132 			if (rv != 0) {
   13133 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   13134 				rv = 0;
   13135 			}
   13136 			break;
   13137 		case NVM_OFF_ID_LED_SETTINGS:
   13138 			rv = wm_nvm_read_word_invm(sc, offset, data);
   13139 			if (rv != 0) {
   13140 				*data = ID_LED_RESERVED_FFFF;
   13141 				rv = 0;
   13142 			}
   13143 			break;
   13144 		default:
   13145 			DPRINTF(WM_DEBUG_NVM,
   13146 			    ("NVM word 0x%02x is not mapped.\n", offset));
   13147 			*data = NVM_RESERVED_WORD;
   13148 			break;
   13149 		}
   13150 	}
   13151 
   13152 	sc->nvm.release(sc);
   13153 	return rv;
   13154 }
   13155 
   13156 /* Lock, detecting NVM type, validate checksum, version and read */
   13157 
   13158 static int
   13159 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   13160 {
   13161 	uint32_t eecd = 0;
   13162 
   13163 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   13164 	    || sc->sc_type == WM_T_82583) {
   13165 		eecd = CSR_READ(sc, WMREG_EECD);
   13166 
   13167 		/* Isolate bits 15 & 16 */
   13168 		eecd = ((eecd >> 15) & 0x03);
   13169 
   13170 		/* If both bits are set, device is Flash type */
   13171 		if (eecd == 0x03)
   13172 			return 0;
   13173 	}
   13174 	return 1;
   13175 }
   13176 
   13177 static int
   13178 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   13179 {
   13180 	uint32_t eec;
   13181 
   13182 	eec = CSR_READ(sc, WMREG_EEC);
   13183 	if ((eec & EEC_FLASH_DETECTED) != 0)
   13184 		return 1;
   13185 
   13186 	return 0;
   13187 }
   13188 
   13189 /*
   13190  * wm_nvm_validate_checksum
   13191  *
   13192  * The checksum is defined as the sum of the first 64 (16 bit) words.
   13193  */
   13194 static int
   13195 wm_nvm_validate_checksum(struct wm_softc *sc)
   13196 {
   13197 	uint16_t checksum;
   13198 	uint16_t eeprom_data;
   13199 #ifdef WM_DEBUG
   13200 	uint16_t csum_wordaddr, valid_checksum;
   13201 #endif
   13202 	int i;
   13203 
   13204 	checksum = 0;
   13205 
   13206 	/* Don't check for I211 */
   13207 	if (sc->sc_type == WM_T_I211)
   13208 		return 0;
   13209 
   13210 #ifdef WM_DEBUG
   13211 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   13212 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   13213 		csum_wordaddr = NVM_OFF_COMPAT;
   13214 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   13215 	} else {
   13216 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   13217 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   13218 	}
   13219 
   13220 	/* Dump EEPROM image for debug */
   13221 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13222 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13223 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   13224 		/* XXX PCH_SPT? */
   13225 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   13226 		if ((eeprom_data & valid_checksum) == 0) {
   13227 			DPRINTF(WM_DEBUG_NVM,
   13228 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   13229 				device_xname(sc->sc_dev), eeprom_data,
   13230 				    valid_checksum));
   13231 		}
   13232 	}
   13233 
   13234 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   13235 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   13236 		for (i = 0; i < NVM_SIZE; i++) {
   13237 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13238 				printf("XXXX ");
   13239 			else
   13240 				printf("%04hx ", eeprom_data);
   13241 			if (i % 8 == 7)
   13242 				printf("\n");
   13243 		}
   13244 	}
   13245 
   13246 #endif /* WM_DEBUG */
   13247 
   13248 	for (i = 0; i < NVM_SIZE; i++) {
   13249 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   13250 			return 1;
   13251 		checksum += eeprom_data;
   13252 	}
   13253 
   13254 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   13255 #ifdef WM_DEBUG
   13256 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   13257 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   13258 #endif
   13259 	}
   13260 
   13261 	return 0;
   13262 }
   13263 
   13264 static void
   13265 wm_nvm_version_invm(struct wm_softc *sc)
   13266 {
   13267 	uint32_t dword;
   13268 
   13269 	/*
   13270 	 * Linux's code to decode version is very strange, so we don't
   13271 	 * obey that algorithm and just use word 61 as the document.
   13272 	 * Perhaps it's not perfect though...
   13273 	 *
   13274 	 * Example:
   13275 	 *
   13276 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   13277 	 */
   13278 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   13279 	dword = __SHIFTOUT(dword, INVM_VER_1);
   13280 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   13281 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   13282 }
   13283 
   13284 static void
   13285 wm_nvm_version(struct wm_softc *sc)
   13286 {
   13287 	uint16_t major, minor, build, patch;
   13288 	uint16_t uid0, uid1;
   13289 	uint16_t nvm_data;
   13290 	uint16_t off;
   13291 	bool check_version = false;
   13292 	bool check_optionrom = false;
   13293 	bool have_build = false;
   13294 	bool have_uid = true;
   13295 
   13296 	/*
   13297 	 * Version format:
   13298 	 *
   13299 	 * XYYZ
   13300 	 * X0YZ
   13301 	 * X0YY
   13302 	 *
   13303 	 * Example:
   13304 	 *
   13305 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   13306 	 *	82571	0x50a6	5.10.6?
   13307 	 *	82572	0x506a	5.6.10?
   13308 	 *	82572EI	0x5069	5.6.9?
   13309 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   13310 	 *		0x2013	2.1.3?
   13311 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   13312 	 */
   13313 
   13314 	/*
   13315 	 * XXX
   13316 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   13317 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   13318 	 */
   13319 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   13320 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   13321 		have_uid = false;
   13322 
   13323 	switch (sc->sc_type) {
   13324 	case WM_T_82571:
   13325 	case WM_T_82572:
   13326 	case WM_T_82574:
   13327 	case WM_T_82583:
   13328 		check_version = true;
   13329 		check_optionrom = true;
   13330 		have_build = true;
   13331 		break;
   13332 	case WM_T_82575:
   13333 	case WM_T_82576:
   13334 	case WM_T_82580:
   13335 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13336 			check_version = true;
   13337 		break;
   13338 	case WM_T_I211:
   13339 		wm_nvm_version_invm(sc);
   13340 		have_uid = false;
   13341 		goto printver;
   13342 	case WM_T_I210:
   13343 		if (!wm_nvm_flash_presence_i210(sc)) {
   13344 			wm_nvm_version_invm(sc);
   13345 			have_uid = false;
   13346 			goto printver;
   13347 		}
   13348 		/* FALLTHROUGH */
   13349 	case WM_T_I350:
   13350 	case WM_T_I354:
   13351 		check_version = true;
   13352 		check_optionrom = true;
   13353 		break;
   13354 	default:
   13355 		return;
   13356 	}
   13357 	if (check_version
   13358 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13359 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13360 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13361 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13362 			build = nvm_data & NVM_BUILD_MASK;
   13363 			have_build = true;
   13364 		} else
   13365 			minor = nvm_data & 0x00ff;
   13366 
   13367 		/* Decimal */
   13368 		minor = (minor / 16) * 10 + (minor % 16);
   13369 		sc->sc_nvm_ver_major = major;
   13370 		sc->sc_nvm_ver_minor = minor;
   13371 
   13372 printver:
   13373 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13374 		    sc->sc_nvm_ver_minor);
   13375 		if (have_build) {
   13376 			sc->sc_nvm_ver_build = build;
   13377 			aprint_verbose(".%d", build);
   13378 		}
   13379 	}
   13380 
   13381 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13382 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13383 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13384 		/* Option ROM Version */
   13385 		if ((off != 0x0000) && (off != 0xffff)) {
   13386 			int rv;
   13387 
   13388 			off += NVM_COMBO_VER_OFF;
   13389 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13390 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13391 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13392 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13393 				/* 16bits */
   13394 				major = uid0 >> 8;
   13395 				build = (uid0 << 8) | (uid1 >> 8);
   13396 				patch = uid1 & 0x00ff;
   13397 				aprint_verbose(", option ROM Version %d.%d.%d",
   13398 				    major, build, patch);
   13399 			}
   13400 		}
   13401 	}
   13402 
   13403 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13404 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13405 }
   13406 
   13407 /*
   13408  * wm_nvm_read:
   13409  *
   13410  *	Read data from the serial EEPROM.
   13411  */
   13412 static int
   13413 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13414 {
   13415 	int rv;
   13416 
   13417 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13418 		device_xname(sc->sc_dev), __func__));
   13419 
   13420 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13421 		return -1;
   13422 
   13423 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13424 
   13425 	return rv;
   13426 }
   13427 
   13428 /*
   13429  * Hardware semaphores.
   13430  * Very complexed...
   13431  */
   13432 
   13433 static int
   13434 wm_get_null(struct wm_softc *sc)
   13435 {
   13436 
   13437 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13438 		device_xname(sc->sc_dev), __func__));
   13439 	return 0;
   13440 }
   13441 
   13442 static void
   13443 wm_put_null(struct wm_softc *sc)
   13444 {
   13445 
   13446 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13447 		device_xname(sc->sc_dev), __func__));
   13448 	return;
   13449 }
   13450 
   13451 static int
   13452 wm_get_eecd(struct wm_softc *sc)
   13453 {
   13454 	uint32_t reg;
   13455 	int x;
   13456 
   13457 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13458 		device_xname(sc->sc_dev), __func__));
   13459 
   13460 	reg = CSR_READ(sc, WMREG_EECD);
   13461 
   13462 	/* Request EEPROM access. */
   13463 	reg |= EECD_EE_REQ;
   13464 	CSR_WRITE(sc, WMREG_EECD, reg);
   13465 
   13466 	/* ..and wait for it to be granted. */
   13467 	for (x = 0; x < 1000; x++) {
   13468 		reg = CSR_READ(sc, WMREG_EECD);
   13469 		if (reg & EECD_EE_GNT)
   13470 			break;
   13471 		delay(5);
   13472 	}
   13473 	if ((reg & EECD_EE_GNT) == 0) {
   13474 		aprint_error_dev(sc->sc_dev,
   13475 		    "could not acquire EEPROM GNT\n");
   13476 		reg &= ~EECD_EE_REQ;
   13477 		CSR_WRITE(sc, WMREG_EECD, reg);
   13478 		return -1;
   13479 	}
   13480 
   13481 	return 0;
   13482 }
   13483 
   13484 static void
   13485 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13486 {
   13487 
   13488 	*eecd |= EECD_SK;
   13489 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13490 	CSR_WRITE_FLUSH(sc);
   13491 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13492 		delay(1);
   13493 	else
   13494 		delay(50);
   13495 }
   13496 
   13497 static void
   13498 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13499 {
   13500 
   13501 	*eecd &= ~EECD_SK;
   13502 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13503 	CSR_WRITE_FLUSH(sc);
   13504 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13505 		delay(1);
   13506 	else
   13507 		delay(50);
   13508 }
   13509 
   13510 static void
   13511 wm_put_eecd(struct wm_softc *sc)
   13512 {
   13513 	uint32_t reg;
   13514 
   13515 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13516 		device_xname(sc->sc_dev), __func__));
   13517 
   13518 	/* Stop nvm */
   13519 	reg = CSR_READ(sc, WMREG_EECD);
   13520 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13521 		/* Pull CS high */
   13522 		reg |= EECD_CS;
   13523 		wm_nvm_eec_clock_lower(sc, &reg);
   13524 	} else {
   13525 		/* CS on Microwire is active-high */
   13526 		reg &= ~(EECD_CS | EECD_DI);
   13527 		CSR_WRITE(sc, WMREG_EECD, reg);
   13528 		wm_nvm_eec_clock_raise(sc, &reg);
   13529 		wm_nvm_eec_clock_lower(sc, &reg);
   13530 	}
   13531 
   13532 	reg = CSR_READ(sc, WMREG_EECD);
   13533 	reg &= ~EECD_EE_REQ;
   13534 	CSR_WRITE(sc, WMREG_EECD, reg);
   13535 
   13536 	return;
   13537 }
   13538 
   13539 /*
   13540  * Get hardware semaphore.
   13541  * Same as e1000_get_hw_semaphore_generic()
   13542  */
   13543 static int
   13544 wm_get_swsm_semaphore(struct wm_softc *sc)
   13545 {
   13546 	int32_t timeout;
   13547 	uint32_t swsm;
   13548 
   13549 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13550 		device_xname(sc->sc_dev), __func__));
   13551 	KASSERT(sc->sc_nvm_wordsize > 0);
   13552 
   13553 retry:
   13554 	/* Get the SW semaphore. */
   13555 	timeout = sc->sc_nvm_wordsize + 1;
   13556 	while (timeout) {
   13557 		swsm = CSR_READ(sc, WMREG_SWSM);
   13558 
   13559 		if ((swsm & SWSM_SMBI) == 0)
   13560 			break;
   13561 
   13562 		delay(50);
   13563 		timeout--;
   13564 	}
   13565 
   13566 	if (timeout == 0) {
   13567 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13568 			/*
   13569 			 * In rare circumstances, the SW semaphore may already
   13570 			 * be held unintentionally. Clear the semaphore once
   13571 			 * before giving up.
   13572 			 */
   13573 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13574 			wm_put_swsm_semaphore(sc);
   13575 			goto retry;
   13576 		}
   13577 		aprint_error_dev(sc->sc_dev,
   13578 		    "could not acquire SWSM SMBI\n");
   13579 		return 1;
   13580 	}
   13581 
   13582 	/* Get the FW semaphore. */
   13583 	timeout = sc->sc_nvm_wordsize + 1;
   13584 	while (timeout) {
   13585 		swsm = CSR_READ(sc, WMREG_SWSM);
   13586 		swsm |= SWSM_SWESMBI;
   13587 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13588 		/* If we managed to set the bit we got the semaphore. */
   13589 		swsm = CSR_READ(sc, WMREG_SWSM);
   13590 		if (swsm & SWSM_SWESMBI)
   13591 			break;
   13592 
   13593 		delay(50);
   13594 		timeout--;
   13595 	}
   13596 
   13597 	if (timeout == 0) {
   13598 		aprint_error_dev(sc->sc_dev,
   13599 		    "could not acquire SWSM SWESMBI\n");
   13600 		/* Release semaphores */
   13601 		wm_put_swsm_semaphore(sc);
   13602 		return 1;
   13603 	}
   13604 	return 0;
   13605 }
   13606 
   13607 /*
   13608  * Put hardware semaphore.
   13609  * Same as e1000_put_hw_semaphore_generic()
   13610  */
   13611 static void
   13612 wm_put_swsm_semaphore(struct wm_softc *sc)
   13613 {
   13614 	uint32_t swsm;
   13615 
   13616 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13617 		device_xname(sc->sc_dev), __func__));
   13618 
   13619 	swsm = CSR_READ(sc, WMREG_SWSM);
   13620 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13621 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13622 }
   13623 
   13624 /*
   13625  * Get SW/FW semaphore.
   13626  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13627  */
   13628 static int
   13629 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13630 {
   13631 	uint32_t swfw_sync;
   13632 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13633 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13634 	int timeout;
   13635 
   13636 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13637 		device_xname(sc->sc_dev), __func__));
   13638 
   13639 	if (sc->sc_type == WM_T_80003)
   13640 		timeout = 50;
   13641 	else
   13642 		timeout = 200;
   13643 
   13644 	while (timeout) {
   13645 		if (wm_get_swsm_semaphore(sc)) {
   13646 			aprint_error_dev(sc->sc_dev,
   13647 			    "%s: failed to get semaphore\n",
   13648 			    __func__);
   13649 			return 1;
   13650 		}
   13651 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13652 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13653 			swfw_sync |= swmask;
   13654 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13655 			wm_put_swsm_semaphore(sc);
   13656 			return 0;
   13657 		}
   13658 		wm_put_swsm_semaphore(sc);
   13659 		delay(5000);
   13660 		timeout--;
   13661 	}
   13662 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13663 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13664 	return 1;
   13665 }
   13666 
   13667 static void
   13668 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13669 {
   13670 	uint32_t swfw_sync;
   13671 
   13672 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13673 		device_xname(sc->sc_dev), __func__));
   13674 
   13675 	while (wm_get_swsm_semaphore(sc) != 0)
   13676 		continue;
   13677 
   13678 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13679 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13680 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13681 
   13682 	wm_put_swsm_semaphore(sc);
   13683 }
   13684 
   13685 static int
   13686 wm_get_nvm_80003(struct wm_softc *sc)
   13687 {
   13688 	int rv;
   13689 
   13690 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13691 		device_xname(sc->sc_dev), __func__));
   13692 
   13693 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13694 		aprint_error_dev(sc->sc_dev,
   13695 		    "%s: failed to get semaphore(SWFW)\n",
   13696 		    __func__);
   13697 		return rv;
   13698 	}
   13699 
   13700 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13701 	    && (rv = wm_get_eecd(sc)) != 0) {
   13702 		aprint_error_dev(sc->sc_dev,
   13703 		    "%s: failed to get semaphore(EECD)\n",
   13704 		    __func__);
   13705 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13706 		return rv;
   13707 	}
   13708 
   13709 	return 0;
   13710 }
   13711 
   13712 static void
   13713 wm_put_nvm_80003(struct wm_softc *sc)
   13714 {
   13715 
   13716 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13717 		device_xname(sc->sc_dev), __func__));
   13718 
   13719 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13720 		wm_put_eecd(sc);
   13721 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13722 }
   13723 
   13724 static int
   13725 wm_get_nvm_82571(struct wm_softc *sc)
   13726 {
   13727 	int rv;
   13728 
   13729 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13730 		device_xname(sc->sc_dev), __func__));
   13731 
   13732 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13733 		return rv;
   13734 
   13735 	switch (sc->sc_type) {
   13736 	case WM_T_82573:
   13737 		break;
   13738 	default:
   13739 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13740 			rv = wm_get_eecd(sc);
   13741 		break;
   13742 	}
   13743 
   13744 	if (rv != 0) {
   13745 		aprint_error_dev(sc->sc_dev,
   13746 		    "%s: failed to get semaphore\n",
   13747 		    __func__);
   13748 		wm_put_swsm_semaphore(sc);
   13749 	}
   13750 
   13751 	return rv;
   13752 }
   13753 
   13754 static void
   13755 wm_put_nvm_82571(struct wm_softc *sc)
   13756 {
   13757 
   13758 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13759 		device_xname(sc->sc_dev), __func__));
   13760 
   13761 	switch (sc->sc_type) {
   13762 	case WM_T_82573:
   13763 		break;
   13764 	default:
   13765 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13766 			wm_put_eecd(sc);
   13767 		break;
   13768 	}
   13769 
   13770 	wm_put_swsm_semaphore(sc);
   13771 }
   13772 
   13773 static int
   13774 wm_get_phy_82575(struct wm_softc *sc)
   13775 {
   13776 
   13777 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13778 		device_xname(sc->sc_dev), __func__));
   13779 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13780 }
   13781 
   13782 static void
   13783 wm_put_phy_82575(struct wm_softc *sc)
   13784 {
   13785 
   13786 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13787 		device_xname(sc->sc_dev), __func__));
   13788 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13789 }
   13790 
   13791 static int
   13792 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13793 {
   13794 	uint32_t ext_ctrl;
   13795 	int timeout = 200;
   13796 
   13797 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13798 		device_xname(sc->sc_dev), __func__));
   13799 
   13800 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13801 	for (timeout = 0; timeout < 200; timeout++) {
   13802 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13803 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13804 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13805 
   13806 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13807 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13808 			return 0;
   13809 		delay(5000);
   13810 	}
   13811 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13812 	    device_xname(sc->sc_dev), ext_ctrl);
   13813 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13814 	return 1;
   13815 }
   13816 
   13817 static void
   13818 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13819 {
   13820 	uint32_t ext_ctrl;
   13821 
   13822 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13823 		device_xname(sc->sc_dev), __func__));
   13824 
   13825 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13826 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13827 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13828 
   13829 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13830 }
   13831 
   13832 static int
   13833 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13834 {
   13835 	uint32_t ext_ctrl;
   13836 	int timeout;
   13837 
   13838 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13839 		device_xname(sc->sc_dev), __func__));
   13840 	mutex_enter(sc->sc_ich_phymtx);
   13841 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13842 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13843 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13844 			break;
   13845 		delay(1000);
   13846 	}
   13847 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13848 		printf("%s: SW has already locked the resource\n",
   13849 		    device_xname(sc->sc_dev));
   13850 		goto out;
   13851 	}
   13852 
   13853 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13854 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13855 	for (timeout = 0; timeout < 1000; timeout++) {
   13856 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13857 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13858 			break;
   13859 		delay(1000);
   13860 	}
   13861 	if (timeout >= 1000) {
   13862 		printf("%s: failed to acquire semaphore\n",
   13863 		    device_xname(sc->sc_dev));
   13864 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13865 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13866 		goto out;
   13867 	}
   13868 	return 0;
   13869 
   13870 out:
   13871 	mutex_exit(sc->sc_ich_phymtx);
   13872 	return 1;
   13873 }
   13874 
   13875 static void
   13876 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13877 {
   13878 	uint32_t ext_ctrl;
   13879 
   13880 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13881 		device_xname(sc->sc_dev), __func__));
   13882 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13883 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13884 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13885 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13886 	} else {
   13887 		printf("%s: Semaphore unexpectedly released\n",
   13888 		    device_xname(sc->sc_dev));
   13889 	}
   13890 
   13891 	mutex_exit(sc->sc_ich_phymtx);
   13892 }
   13893 
   13894 static int
   13895 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13896 {
   13897 
   13898 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13899 		device_xname(sc->sc_dev), __func__));
   13900 	mutex_enter(sc->sc_ich_nvmmtx);
   13901 
   13902 	return 0;
   13903 }
   13904 
   13905 static void
   13906 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13907 {
   13908 
   13909 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13910 		device_xname(sc->sc_dev), __func__));
   13911 	mutex_exit(sc->sc_ich_nvmmtx);
   13912 }
   13913 
   13914 static int
   13915 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13916 {
   13917 	int i = 0;
   13918 	uint32_t reg;
   13919 
   13920 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13921 		device_xname(sc->sc_dev), __func__));
   13922 
   13923 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13924 	do {
   13925 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13926 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13927 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13928 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13929 			break;
   13930 		delay(2*1000);
   13931 		i++;
   13932 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13933 
   13934 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13935 		wm_put_hw_semaphore_82573(sc);
   13936 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13937 		    device_xname(sc->sc_dev));
   13938 		return -1;
   13939 	}
   13940 
   13941 	return 0;
   13942 }
   13943 
   13944 static void
   13945 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13946 {
   13947 	uint32_t reg;
   13948 
   13949 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13950 		device_xname(sc->sc_dev), __func__));
   13951 
   13952 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13953 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13954 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13955 }
   13956 
   13957 /*
   13958  * Management mode and power management related subroutines.
   13959  * BMC, AMT, suspend/resume and EEE.
   13960  */
   13961 
   13962 #ifdef WM_WOL
   13963 static int
   13964 wm_check_mng_mode(struct wm_softc *sc)
   13965 {
   13966 	int rv;
   13967 
   13968 	switch (sc->sc_type) {
   13969 	case WM_T_ICH8:
   13970 	case WM_T_ICH9:
   13971 	case WM_T_ICH10:
   13972 	case WM_T_PCH:
   13973 	case WM_T_PCH2:
   13974 	case WM_T_PCH_LPT:
   13975 	case WM_T_PCH_SPT:
   13976 	case WM_T_PCH_CNP:
   13977 		rv = wm_check_mng_mode_ich8lan(sc);
   13978 		break;
   13979 	case WM_T_82574:
   13980 	case WM_T_82583:
   13981 		rv = wm_check_mng_mode_82574(sc);
   13982 		break;
   13983 	case WM_T_82571:
   13984 	case WM_T_82572:
   13985 	case WM_T_82573:
   13986 	case WM_T_80003:
   13987 		rv = wm_check_mng_mode_generic(sc);
   13988 		break;
   13989 	default:
   13990 		/* noting to do */
   13991 		rv = 0;
   13992 		break;
   13993 	}
   13994 
   13995 	return rv;
   13996 }
   13997 
   13998 static int
   13999 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   14000 {
   14001 	uint32_t fwsm;
   14002 
   14003 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14004 
   14005 	if (((fwsm & FWSM_FW_VALID) != 0)
   14006 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14007 		return 1;
   14008 
   14009 	return 0;
   14010 }
   14011 
   14012 static int
   14013 wm_check_mng_mode_82574(struct wm_softc *sc)
   14014 {
   14015 	uint16_t data;
   14016 
   14017 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14018 
   14019 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   14020 		return 1;
   14021 
   14022 	return 0;
   14023 }
   14024 
   14025 static int
   14026 wm_check_mng_mode_generic(struct wm_softc *sc)
   14027 {
   14028 	uint32_t fwsm;
   14029 
   14030 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14031 
   14032 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   14033 		return 1;
   14034 
   14035 	return 0;
   14036 }
   14037 #endif /* WM_WOL */
   14038 
   14039 static int
   14040 wm_enable_mng_pass_thru(struct wm_softc *sc)
   14041 {
   14042 	uint32_t manc, fwsm, factps;
   14043 
   14044 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   14045 		return 0;
   14046 
   14047 	manc = CSR_READ(sc, WMREG_MANC);
   14048 
   14049 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   14050 		device_xname(sc->sc_dev), manc));
   14051 	if ((manc & MANC_RECV_TCO_EN) == 0)
   14052 		return 0;
   14053 
   14054 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   14055 		fwsm = CSR_READ(sc, WMREG_FWSM);
   14056 		factps = CSR_READ(sc, WMREG_FACTPS);
   14057 		if (((factps & FACTPS_MNGCG) == 0)
   14058 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   14059 			return 1;
   14060 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   14061 		uint16_t data;
   14062 
   14063 		factps = CSR_READ(sc, WMREG_FACTPS);
   14064 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   14065 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   14066 			device_xname(sc->sc_dev), factps, data));
   14067 		if (((factps & FACTPS_MNGCG) == 0)
   14068 		    && ((data & NVM_CFG2_MNGM_MASK)
   14069 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   14070 			return 1;
   14071 	} else if (((manc & MANC_SMBUS_EN) != 0)
   14072 	    && ((manc & MANC_ASF_EN) == 0))
   14073 		return 1;
   14074 
   14075 	return 0;
   14076 }
   14077 
   14078 static bool
   14079 wm_phy_resetisblocked(struct wm_softc *sc)
   14080 {
   14081 	bool blocked = false;
   14082 	uint32_t reg;
   14083 	int i = 0;
   14084 
   14085 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14086 		device_xname(sc->sc_dev), __func__));
   14087 
   14088 	switch (sc->sc_type) {
   14089 	case WM_T_ICH8:
   14090 	case WM_T_ICH9:
   14091 	case WM_T_ICH10:
   14092 	case WM_T_PCH:
   14093 	case WM_T_PCH2:
   14094 	case WM_T_PCH_LPT:
   14095 	case WM_T_PCH_SPT:
   14096 	case WM_T_PCH_CNP:
   14097 		do {
   14098 			reg = CSR_READ(sc, WMREG_FWSM);
   14099 			if ((reg & FWSM_RSPCIPHY) == 0) {
   14100 				blocked = true;
   14101 				delay(10*1000);
   14102 				continue;
   14103 			}
   14104 			blocked = false;
   14105 		} while (blocked && (i++ < 30));
   14106 		return blocked;
   14107 		break;
   14108 	case WM_T_82571:
   14109 	case WM_T_82572:
   14110 	case WM_T_82573:
   14111 	case WM_T_82574:
   14112 	case WM_T_82583:
   14113 	case WM_T_80003:
   14114 		reg = CSR_READ(sc, WMREG_MANC);
   14115 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   14116 			return true;
   14117 		else
   14118 			return false;
   14119 		break;
   14120 	default:
   14121 		/* no problem */
   14122 		break;
   14123 	}
   14124 
   14125 	return false;
   14126 }
   14127 
   14128 static void
   14129 wm_get_hw_control(struct wm_softc *sc)
   14130 {
   14131 	uint32_t reg;
   14132 
   14133 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14134 		device_xname(sc->sc_dev), __func__));
   14135 
   14136 	if (sc->sc_type == WM_T_82573) {
   14137 		reg = CSR_READ(sc, WMREG_SWSM);
   14138 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   14139 	} else if (sc->sc_type >= WM_T_82571) {
   14140 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14141 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   14142 	}
   14143 }
   14144 
   14145 static void
   14146 wm_release_hw_control(struct wm_softc *sc)
   14147 {
   14148 	uint32_t reg;
   14149 
   14150 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   14151 		device_xname(sc->sc_dev), __func__));
   14152 
   14153 	if (sc->sc_type == WM_T_82573) {
   14154 		reg = CSR_READ(sc, WMREG_SWSM);
   14155 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   14156 	} else if (sc->sc_type >= WM_T_82571) {
   14157 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14158 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   14159 	}
   14160 }
   14161 
   14162 static void
   14163 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   14164 {
   14165 	uint32_t reg;
   14166 
   14167 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14168 		device_xname(sc->sc_dev), __func__));
   14169 
   14170 	if (sc->sc_type < WM_T_PCH2)
   14171 		return;
   14172 
   14173 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   14174 
   14175 	if (gate)
   14176 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   14177 	else
   14178 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   14179 
   14180 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   14181 }
   14182 
   14183 static int
   14184 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   14185 {
   14186 	uint32_t fwsm, reg;
   14187 	int rv = 0;
   14188 
   14189 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14190 		device_xname(sc->sc_dev), __func__));
   14191 
   14192 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   14193 	wm_gate_hw_phy_config_ich8lan(sc, true);
   14194 
   14195 	/* Disable ULP */
   14196 	wm_ulp_disable(sc);
   14197 
   14198 	/* Acquire PHY semaphore */
   14199 	rv = sc->phy.acquire(sc);
   14200 	if (rv != 0) {
   14201 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14202 		device_xname(sc->sc_dev), __func__));
   14203 		return -1;
   14204 	}
   14205 
   14206 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   14207 	 * inaccessible and resetting the PHY is not blocked, toggle the
   14208 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   14209 	 */
   14210 	fwsm = CSR_READ(sc, WMREG_FWSM);
   14211 	switch (sc->sc_type) {
   14212 	case WM_T_PCH_LPT:
   14213 	case WM_T_PCH_SPT:
   14214 	case WM_T_PCH_CNP:
   14215 		if (wm_phy_is_accessible_pchlan(sc))
   14216 			break;
   14217 
   14218 		/* Before toggling LANPHYPC, see if PHY is accessible by
   14219 		 * forcing MAC to SMBus mode first.
   14220 		 */
   14221 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14222 		reg |= CTRL_EXT_FORCE_SMBUS;
   14223 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14224 #if 0
   14225 		/* XXX Isn't this required??? */
   14226 		CSR_WRITE_FLUSH(sc);
   14227 #endif
   14228 		/* Wait 50 milliseconds for MAC to finish any retries
   14229 		 * that it might be trying to perform from previous
   14230 		 * attempts to acknowledge any phy read requests.
   14231 		 */
   14232 		delay(50 * 1000);
   14233 		/* FALLTHROUGH */
   14234 	case WM_T_PCH2:
   14235 		if (wm_phy_is_accessible_pchlan(sc) == true)
   14236 			break;
   14237 		/* FALLTHROUGH */
   14238 	case WM_T_PCH:
   14239 		if (sc->sc_type == WM_T_PCH)
   14240 			if ((fwsm & FWSM_FW_VALID) != 0)
   14241 				break;
   14242 
   14243 		if (wm_phy_resetisblocked(sc) == true) {
   14244 			printf("XXX reset is blocked(3)\n");
   14245 			break;
   14246 		}
   14247 
   14248 		/* Toggle LANPHYPC Value bit */
   14249 		wm_toggle_lanphypc_pch_lpt(sc);
   14250 
   14251 		if (sc->sc_type >= WM_T_PCH_LPT) {
   14252 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14253 				break;
   14254 
   14255 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   14256 			 * so ensure that the MAC is also out of SMBus mode
   14257 			 */
   14258 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14259 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14260 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14261 
   14262 			if (wm_phy_is_accessible_pchlan(sc) == true)
   14263 				break;
   14264 			rv = -1;
   14265 		}
   14266 		break;
   14267 	default:
   14268 		break;
   14269 	}
   14270 
   14271 	/* Release semaphore */
   14272 	sc->phy.release(sc);
   14273 
   14274 	if (rv == 0) {
   14275 		/* Check to see if able to reset PHY.  Print error if not */
   14276 		if (wm_phy_resetisblocked(sc)) {
   14277 			printf("XXX reset is blocked(4)\n");
   14278 			goto out;
   14279 		}
   14280 
   14281 		/* Reset the PHY before any access to it.  Doing so, ensures
   14282 		 * that the PHY is in a known good state before we read/write
   14283 		 * PHY registers.  The generic reset is sufficient here,
   14284 		 * because we haven't determined the PHY type yet.
   14285 		 */
   14286 		if (wm_reset_phy(sc) != 0)
   14287 			goto out;
   14288 
   14289 		/* On a successful reset, possibly need to wait for the PHY
   14290 		 * to quiesce to an accessible state before returning control
   14291 		 * to the calling function.  If the PHY does not quiesce, then
   14292 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   14293 		 *  the PHY is in.
   14294 		 */
   14295 		if (wm_phy_resetisblocked(sc))
   14296 			printf("XXX reset is blocked(4)\n");
   14297 	}
   14298 
   14299 out:
   14300 	/* Ungate automatic PHY configuration on non-managed 82579 */
   14301 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   14302 		delay(10*1000);
   14303 		wm_gate_hw_phy_config_ich8lan(sc, false);
   14304 	}
   14305 
   14306 	return 0;
   14307 }
   14308 
   14309 static void
   14310 wm_init_manageability(struct wm_softc *sc)
   14311 {
   14312 
   14313 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14314 		device_xname(sc->sc_dev), __func__));
   14315 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14316 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   14317 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14318 
   14319 		/* Disable hardware interception of ARP */
   14320 		manc &= ~MANC_ARP_EN;
   14321 
   14322 		/* Enable receiving management packets to the host */
   14323 		if (sc->sc_type >= WM_T_82571) {
   14324 			manc |= MANC_EN_MNG2HOST;
   14325 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14326 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14327 		}
   14328 
   14329 		CSR_WRITE(sc, WMREG_MANC, manc);
   14330 	}
   14331 }
   14332 
   14333 static void
   14334 wm_release_manageability(struct wm_softc *sc)
   14335 {
   14336 
   14337 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14338 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14339 
   14340 		manc |= MANC_ARP_EN;
   14341 		if (sc->sc_type >= WM_T_82571)
   14342 			manc &= ~MANC_EN_MNG2HOST;
   14343 
   14344 		CSR_WRITE(sc, WMREG_MANC, manc);
   14345 	}
   14346 }
   14347 
   14348 static void
   14349 wm_get_wakeup(struct wm_softc *sc)
   14350 {
   14351 
   14352 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14353 	switch (sc->sc_type) {
   14354 	case WM_T_82573:
   14355 	case WM_T_82583:
   14356 		sc->sc_flags |= WM_F_HAS_AMT;
   14357 		/* FALLTHROUGH */
   14358 	case WM_T_80003:
   14359 	case WM_T_82575:
   14360 	case WM_T_82576:
   14361 	case WM_T_82580:
   14362 	case WM_T_I350:
   14363 	case WM_T_I354:
   14364 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14365 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14366 		/* FALLTHROUGH */
   14367 	case WM_T_82541:
   14368 	case WM_T_82541_2:
   14369 	case WM_T_82547:
   14370 	case WM_T_82547_2:
   14371 	case WM_T_82571:
   14372 	case WM_T_82572:
   14373 	case WM_T_82574:
   14374 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14375 		break;
   14376 	case WM_T_ICH8:
   14377 	case WM_T_ICH9:
   14378 	case WM_T_ICH10:
   14379 	case WM_T_PCH:
   14380 	case WM_T_PCH2:
   14381 	case WM_T_PCH_LPT:
   14382 	case WM_T_PCH_SPT:
   14383 	case WM_T_PCH_CNP:
   14384 		sc->sc_flags |= WM_F_HAS_AMT;
   14385 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14386 		break;
   14387 	default:
   14388 		break;
   14389 	}
   14390 
   14391 	/* 1: HAS_MANAGE */
   14392 	if (wm_enable_mng_pass_thru(sc) != 0)
   14393 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14394 
   14395 	/*
   14396 	 * Note that the WOL flags is set after the resetting of the eeprom
   14397 	 * stuff
   14398 	 */
   14399 }
   14400 
   14401 /*
   14402  * Unconfigure Ultra Low Power mode.
   14403  * Only for I217 and newer (see below).
   14404  */
   14405 static int
   14406 wm_ulp_disable(struct wm_softc *sc)
   14407 {
   14408 	uint32_t reg;
   14409 	uint16_t phyreg;
   14410 	int i = 0, rv = 0;
   14411 
   14412 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14413 		device_xname(sc->sc_dev), __func__));
   14414 	/* Exclude old devices */
   14415 	if ((sc->sc_type < WM_T_PCH_LPT)
   14416 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14417 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14418 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14419 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14420 		return 0;
   14421 
   14422 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14423 		/* Request ME un-configure ULP mode in the PHY */
   14424 		reg = CSR_READ(sc, WMREG_H2ME);
   14425 		reg &= ~H2ME_ULP;
   14426 		reg |= H2ME_ENFORCE_SETTINGS;
   14427 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14428 
   14429 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14430 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14431 			if (i++ == 30) {
   14432 				printf("%s timed out\n", __func__);
   14433 				return -1;
   14434 			}
   14435 			delay(10 * 1000);
   14436 		}
   14437 		reg = CSR_READ(sc, WMREG_H2ME);
   14438 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14439 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14440 
   14441 		return 0;
   14442 	}
   14443 
   14444 	/* Acquire semaphore */
   14445 	rv = sc->phy.acquire(sc);
   14446 	if (rv != 0) {
   14447 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14448 		device_xname(sc->sc_dev), __func__));
   14449 		return -1;
   14450 	}
   14451 
   14452 	/* Toggle LANPHYPC */
   14453 	wm_toggle_lanphypc_pch_lpt(sc);
   14454 
   14455 	/* Unforce SMBus mode in PHY */
   14456 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14457 	if (rv != 0) {
   14458 		uint32_t reg2;
   14459 
   14460 		printf("%s: Force SMBus first.\n", __func__);
   14461 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14462 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14463 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14464 		delay(50 * 1000);
   14465 
   14466 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14467 		    &phyreg);
   14468 		if (rv != 0)
   14469 			goto release;
   14470 	}
   14471 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14472 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14473 
   14474 	/* Unforce SMBus mode in MAC */
   14475 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14476 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14477 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14478 
   14479 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14480 	if (rv != 0)
   14481 		goto release;
   14482 	phyreg |= HV_PM_CTRL_K1_ENA;
   14483 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14484 
   14485 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14486 		&phyreg);
   14487 	if (rv != 0)
   14488 		goto release;
   14489 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14490 	    | I218_ULP_CONFIG1_STICKY_ULP
   14491 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14492 	    | I218_ULP_CONFIG1_WOL_HOST
   14493 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14494 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14495 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14496 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14497 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14498 	phyreg |= I218_ULP_CONFIG1_START;
   14499 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14500 
   14501 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14502 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14503 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14504 
   14505 release:
   14506 	/* Release semaphore */
   14507 	sc->phy.release(sc);
   14508 	wm_gmii_reset(sc);
   14509 	delay(50 * 1000);
   14510 
   14511 	return rv;
   14512 }
   14513 
   14514 /* WOL in the newer chipset interfaces (pchlan) */
   14515 static int
   14516 wm_enable_phy_wakeup(struct wm_softc *sc)
   14517 {
   14518 	device_t dev = sc->sc_dev;
   14519 	uint32_t mreg, moff;
   14520 	uint16_t wuce, wuc, wufc, preg;
   14521 	int i, rv;
   14522 
   14523 	KASSERT(sc->sc_type >= WM_T_PCH);
   14524 
   14525 	/* Copy MAC RARs to PHY RARs */
   14526 	wm_copy_rx_addrs_to_phy_ich8lan(sc);
   14527 
   14528 	/* Activate PHY wakeup */
   14529 	rv = sc->phy.acquire(sc);
   14530 	if (rv != 0) {
   14531 		device_printf(dev, "%s: failed to acquire semaphore\n",
   14532 		    __func__);
   14533 		return rv;
   14534 	}
   14535 
   14536 	/*
   14537 	 * Enable access to PHY wakeup registers.
   14538 	 * BM_MTA, BM_RCTL, BM_WUFC and BM_WUC are in BM_WUC_PAGE.
   14539 	 */
   14540 	rv = wm_enable_phy_wakeup_reg_access_bm(dev, &wuce);
   14541 	if (rv != 0) {
   14542 		device_printf(dev,
   14543 		    "%s: Could not enable PHY wakeup reg access\n", __func__);
   14544 		goto release;
   14545 	}
   14546 
   14547 	/* Copy MAC MTA to PHY MTA */
   14548 	for (i = 0; i < WM_ICH8_MC_TABSIZE; i++) {
   14549 		uint16_t lo, hi;
   14550 
   14551 		mreg = CSR_READ(sc, WMREG_CORDOVA_MTA + (i * 4));
   14552 		lo = (uint16_t)(mreg & 0xffff);
   14553 		hi = (uint16_t)((mreg >> 16) & 0xffff);
   14554 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i), &lo, 0, true);
   14555 		wm_access_phy_wakeup_reg_bm(dev, BM_MTA(i) + 1, &hi, 0, true);
   14556 	}
   14557 
   14558 	/* Configure PHY Rx Control register */
   14559 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 1, true);
   14560 	mreg = CSR_READ(sc, WMREG_RCTL);
   14561 	if (mreg & RCTL_UPE)
   14562 		preg |= BM_RCTL_UPE;
   14563 	if (mreg & RCTL_MPE)
   14564 		preg |= BM_RCTL_MPE;
   14565 	preg &= ~(BM_RCTL_MO_MASK);
   14566 	moff = __SHIFTOUT(mreg, RCTL_MO);
   14567 	if (moff != 0)
   14568 		preg |= moff << BM_RCTL_MO_SHIFT;
   14569 	if (mreg & RCTL_BAM)
   14570 		preg |= BM_RCTL_BAM;
   14571 	if (mreg & RCTL_PMCF)
   14572 		preg |= BM_RCTL_PMCF;
   14573 	mreg = CSR_READ(sc, WMREG_CTRL);
   14574 	if (mreg & CTRL_RFCE)
   14575 		preg |= BM_RCTL_RFCE;
   14576 	wm_access_phy_wakeup_reg_bm(dev, BM_RCTL, &preg, 0, true);
   14577 
   14578 	wuc = WUC_APME | WUC_PME_EN;
   14579 	wufc = WUFC_MAG;
   14580 	/* Enable PHY wakeup in MAC register */
   14581 	CSR_WRITE(sc, WMREG_WUC,
   14582 	    WUC_PHY_WAKE | WUC_PME_STATUS | WUC_APMPME | wuc);
   14583 	CSR_WRITE(sc, WMREG_WUFC, wufc);
   14584 
   14585 	/* Configure and enable PHY wakeup in PHY registers */
   14586 	wm_access_phy_wakeup_reg_bm(dev, BM_WUC, &wuc, 0, true);
   14587 	wm_access_phy_wakeup_reg_bm(dev, BM_WUFC, &wufc, 0, true);
   14588 
   14589 	wuce |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
   14590 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   14591 
   14592 release:
   14593 	sc->phy.release(sc);
   14594 
   14595 	return 0;
   14596 }
   14597 
   14598 /* Power down workaround on D3 */
   14599 static void
   14600 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14601 {
   14602 	uint32_t reg;
   14603 	int i;
   14604 
   14605 	for (i = 0; i < 2; i++) {
   14606 		/* Disable link */
   14607 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14608 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14609 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14610 
   14611 		/*
   14612 		 * Call gig speed drop workaround on Gig disable before
   14613 		 * accessing any PHY registers
   14614 		 */
   14615 		if (sc->sc_type == WM_T_ICH8)
   14616 			wm_gig_downshift_workaround_ich8lan(sc);
   14617 
   14618 		/* Write VR power-down enable */
   14619 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14620 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14621 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14622 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   14623 
   14624 		/* Read it back and test */
   14625 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14626 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14627 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14628 			break;
   14629 
   14630 		/* Issue PHY reset and repeat at most one more time */
   14631 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14632 	}
   14633 }
   14634 
   14635 /*
   14636  *  wm_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14637  *  @sc: pointer to the HW structure
   14638  *
   14639  *  During S0 to Sx transition, it is possible the link remains at gig
   14640  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14641  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14642  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14643  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14644  *  needs to be written.
   14645  *  Parts that support (and are linked to a partner which support) EEE in
   14646  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14647  *  than 10Mbps w/o EEE.
   14648  */
   14649 static void
   14650 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14651 {
   14652 	uint32_t phy_ctrl;
   14653 
   14654 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14655 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14656 
   14657 	if (sc->sc_phytype == WMPHY_I217) {
   14658 		uint16_t devid = sc->sc_pcidevid;
   14659 
   14660 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14661 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14662 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14663 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14664 		    (sc->sc_type >= WM_T_PCH_SPT))
   14665 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14666 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14667 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14668 
   14669 #if 0 /* notyet */
   14670 		if (sc->phy.acquire(sc) != 0)
   14671 			goto out;
   14672 
   14673 		/* XXX Do workaround for EEE */
   14674 
   14675 		/*
   14676 		 * For i217 Intel Rapid Start Technology support,
   14677 		 * when the system is going into Sx and no manageability engine
   14678 		 * is present, the driver must configure proxy to reset only on
   14679 		 * power good.	LPI (Low Power Idle) state must also reset only
   14680 		 * on power good, as well as the MTA (Multicast table array).
   14681 		 * The SMBus release must also be disabled on LCD reset.
   14682 		 */
   14683 
   14684 		/*
   14685 		 * Enable MTA to reset for Intel Rapid Start Technology
   14686 		 * Support
   14687 		 */
   14688 
   14689 		sc->phy.release(sc);
   14690 #endif
   14691 	}
   14692 #if 0
   14693 out:
   14694 #endif
   14695 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   14696 
   14697 	if (sc->sc_type == WM_T_ICH8)
   14698 		wm_gig_downshift_workaround_ich8lan(sc);
   14699 
   14700 	if (sc->sc_type >= WM_T_PCH) {
   14701 		wm_oem_bits_config_ich8lan(sc, false);
   14702 
   14703 		/* Reset PHY to activate OEM bits on 82577/8 */
   14704 		if (sc->sc_type == WM_T_PCH)
   14705 			wm_reset_phy(sc);
   14706 
   14707 		if (sc->phy.acquire(sc) != 0)
   14708 			return;
   14709 		wm_write_smbus_addr(sc);
   14710 		sc->phy.release(sc);
   14711 	}
   14712 }
   14713 
   14714 /*
   14715  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   14716  *  @sc: pointer to the HW structure
   14717  *
   14718  *  During Sx to S0 transitions on non-managed devices or managed devices
   14719  *  on which PHY resets are not blocked, if the PHY registers cannot be
   14720  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   14721  *  the PHY.
   14722  *  On i217, setup Intel Rapid Start Technology.
   14723  */
   14724 static int
   14725 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   14726 {
   14727 	device_t dev = sc->sc_dev;
   14728 	int rv;
   14729 
   14730 	if (sc->sc_type < WM_T_PCH2)
   14731 		return 0;
   14732 
   14733 	rv = wm_init_phy_workarounds_pchlan(sc);
   14734 	if (rv != 0)
   14735 		return -1;
   14736 
   14737 	/* For i217 Intel Rapid Start Technology support when the system
   14738 	 * is transitioning from Sx and no manageability engine is present
   14739 	 * configure SMBus to restore on reset, disable proxy, and enable
   14740 	 * the reset on MTA (Multicast table array).
   14741 	 */
   14742 	if (sc->sc_phytype == WMPHY_I217) {
   14743 		uint16_t phy_reg;
   14744 
   14745 		if (sc->phy.acquire(sc) != 0)
   14746 			return -1;
   14747 
   14748 		/* Clear Auto Enable LPI after link up */
   14749 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   14750 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14751 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   14752 
   14753 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14754 			/* Restore clear on SMB if no manageability engine
   14755 			 * is present
   14756 			 */
   14757 			rv = sc->phy.readreg_locked(dev, 1, I217_MEMPWR,
   14758 			    &phy_reg);
   14759 			if (rv != 0)
   14760 				goto release;
   14761 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   14762 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   14763 
   14764 			/* Disable Proxy */
   14765 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   14766 		}
   14767 		/* Enable reset on MTA */
   14768 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   14769 		if (rv != 0)
   14770 			goto release;
   14771 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   14772 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   14773 
   14774 release:
   14775 		sc->phy.release(sc);
   14776 		return rv;
   14777 	}
   14778 
   14779 	return 0;
   14780 }
   14781 
   14782 static void
   14783 wm_enable_wakeup(struct wm_softc *sc)
   14784 {
   14785 	uint32_t reg, pmreg;
   14786 	pcireg_t pmode;
   14787 	int rv = 0;
   14788 
   14789 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14790 		device_xname(sc->sc_dev), __func__));
   14791 
   14792 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14793 	    &pmreg, NULL) == 0)
   14794 		return;
   14795 
   14796 	if ((sc->sc_flags & WM_F_WOL) == 0)
   14797 		goto pme;
   14798 
   14799 	/* Advertise the wakeup capability */
   14800 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   14801 	    | CTRL_SWDPIN(3));
   14802 
   14803 	/* Keep the laser running on fiber adapters */
   14804 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   14805 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   14806 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14807 		reg |= CTRL_EXT_SWDPIN(3);
   14808 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14809 	}
   14810 
   14811 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   14812 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH) ||
   14813 	    (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT) ||
   14814 	    (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   14815 		wm_suspend_workarounds_ich8lan(sc);
   14816 
   14817 #if 0	/* for the multicast packet */
   14818 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   14819 	reg |= WUFC_MC;
   14820 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   14821 #endif
   14822 
   14823 	if (sc->sc_type >= WM_T_PCH) {
   14824 		rv = wm_enable_phy_wakeup(sc);
   14825 		if (rv != 0)
   14826 			goto pme;
   14827 	} else {
   14828 		/* Enable wakeup by the MAC */
   14829 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   14830 		CSR_WRITE(sc, WMREG_WUFC, WUFC_MAG);
   14831 	}
   14832 
   14833 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14834 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14835 		|| (sc->sc_type == WM_T_PCH2))
   14836 	    && (sc->sc_phytype == WMPHY_IGP_3))
   14837 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   14838 
   14839 pme:
   14840 	/* Request PME */
   14841 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   14842 	if ((rv == 0) && (sc->sc_flags & WM_F_WOL) != 0) {
   14843 		/* For WOL */
   14844 		pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   14845 	} else {
   14846 		/* Disable WOL */
   14847 		pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   14848 	}
   14849 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   14850 }
   14851 
   14852 /* Disable ASPM L0s and/or L1 for workaround */
   14853 static void
   14854 wm_disable_aspm(struct wm_softc *sc)
   14855 {
   14856 	pcireg_t reg, mask = 0;
   14857 	unsigned const char *str = "";
   14858 
   14859 	/*
   14860 	 *  Only for PCIe device which has PCIe capability in the PCI config
   14861 	 * space.
   14862 	 */
   14863 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   14864 		return;
   14865 
   14866 	switch (sc->sc_type) {
   14867 	case WM_T_82571:
   14868 	case WM_T_82572:
   14869 		/*
   14870 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   14871 		 * State Power management L1 State (ASPM L1).
   14872 		 */
   14873 		mask = PCIE_LCSR_ASPM_L1;
   14874 		str = "L1 is";
   14875 		break;
   14876 	case WM_T_82573:
   14877 	case WM_T_82574:
   14878 	case WM_T_82583:
   14879 		/*
   14880 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   14881 		 *
   14882 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   14883 		 * some chipset.  The document of 82574 and 82583 says that
   14884 		 * disabling L0s with some specific chipset is sufficient,
   14885 		 * but we follow as of the Intel em driver does.
   14886 		 *
   14887 		 * References:
   14888 		 * Errata 8 of the Specification Update of i82573.
   14889 		 * Errata 20 of the Specification Update of i82574.
   14890 		 * Errata 9 of the Specification Update of i82583.
   14891 		 */
   14892 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   14893 		str = "L0s and L1 are";
   14894 		break;
   14895 	default:
   14896 		return;
   14897 	}
   14898 
   14899 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14900 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   14901 	reg &= ~mask;
   14902 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14903 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   14904 
   14905 	/* Print only in wm_attach() */
   14906 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   14907 		aprint_verbose_dev(sc->sc_dev,
   14908 		    "ASPM %s disabled to workaround the errata.\n", str);
   14909 }
   14910 
   14911 /* LPLU */
   14912 
   14913 static void
   14914 wm_lplu_d0_disable(struct wm_softc *sc)
   14915 {
   14916 	struct mii_data *mii = &sc->sc_mii;
   14917 	uint32_t reg;
   14918 
   14919 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14920 		device_xname(sc->sc_dev), __func__));
   14921 
   14922 	if (sc->sc_phytype == WMPHY_IFE)
   14923 		return;
   14924 
   14925 	switch (sc->sc_type) {
   14926 	case WM_T_82571:
   14927 	case WM_T_82572:
   14928 	case WM_T_82573:
   14929 	case WM_T_82575:
   14930 	case WM_T_82576:
   14931 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   14932 		reg &= ~PMR_D0_LPLU;
   14933 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   14934 		break;
   14935 	case WM_T_82580:
   14936 	case WM_T_I350:
   14937 	case WM_T_I210:
   14938 	case WM_T_I211:
   14939 		reg = CSR_READ(sc, WMREG_PHPM);
   14940 		reg &= ~PHPM_D0A_LPLU;
   14941 		CSR_WRITE(sc, WMREG_PHPM, reg);
   14942 		break;
   14943 	case WM_T_82574:
   14944 	case WM_T_82583:
   14945 	case WM_T_ICH8:
   14946 	case WM_T_ICH9:
   14947 	case WM_T_ICH10:
   14948 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14949 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   14950 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14951 		CSR_WRITE_FLUSH(sc);
   14952 		break;
   14953 	case WM_T_PCH:
   14954 	case WM_T_PCH2:
   14955 	case WM_T_PCH_LPT:
   14956 	case WM_T_PCH_SPT:
   14957 	case WM_T_PCH_CNP:
   14958 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   14959 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   14960 		if (wm_phy_resetisblocked(sc) == false)
   14961 			reg |= HV_OEM_BITS_ANEGNOW;
   14962 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   14963 		break;
   14964 	default:
   14965 		break;
   14966 	}
   14967 }
   14968 
   14969 /* EEE */
   14970 
   14971 static int
   14972 wm_set_eee_i350(struct wm_softc *sc)
   14973 {
   14974 	struct ethercom *ec = &sc->sc_ethercom;
   14975 	uint32_t ipcnfg, eeer;
   14976 	uint32_t ipcnfg_mask
   14977 	    = IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN | IPCNFG_10BASE_TE;
   14978 	uint32_t eeer_mask = EEER_TX_LPI_EN | EEER_RX_LPI_EN | EEER_LPI_FC;
   14979 
   14980 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   14981 	eeer = CSR_READ(sc, WMREG_EEER);
   14982 
   14983 	/* enable or disable per user setting */
   14984 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   14985 		ipcnfg |= ipcnfg_mask;
   14986 		eeer |= eeer_mask;
   14987 	} else {
   14988 		ipcnfg &= ~ipcnfg_mask;
   14989 		eeer &= ~eeer_mask;
   14990 	}
   14991 
   14992 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   14993 	CSR_WRITE(sc, WMREG_EEER, eeer);
   14994 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   14995 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   14996 
   14997 	return 0;
   14998 }
   14999 
   15000 static int
   15001 wm_set_eee_pchlan(struct wm_softc *sc)
   15002 {
   15003 	device_t dev = sc->sc_dev;
   15004 	struct ethercom *ec = &sc->sc_ethercom;
   15005 	uint16_t lpa, pcs_status, adv_addr, adv, lpi_ctrl, data;
   15006 	int rv = 0;
   15007 
   15008 	switch (sc->sc_phytype) {
   15009 	case WMPHY_82579:
   15010 		lpa = I82579_EEE_LP_ABILITY;
   15011 		pcs_status = I82579_EEE_PCS_STATUS;
   15012 		adv_addr = I82579_EEE_ADVERTISEMENT;
   15013 		break;
   15014 	case WMPHY_I217:
   15015 		lpa = I217_EEE_LP_ABILITY;
   15016 		pcs_status = I217_EEE_PCS_STATUS;
   15017 		adv_addr = I217_EEE_ADVERTISEMENT;
   15018 		break;
   15019 	default:
   15020 		return 0;
   15021 	}
   15022 
   15023 	if (sc->phy.acquire(sc)) {
   15024 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   15025 		return 0;
   15026 	}
   15027 
   15028 	rv = sc->phy.readreg_locked(dev, 1, I82579_LPI_CTRL, &lpi_ctrl);
   15029 	if (rv != 0)
   15030 		goto release;
   15031 
   15032 	/* Clear bits that enable EEE in various speeds */
   15033 	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE;
   15034 
   15035 	if ((ec->ec_capenable & ETHERCAP_EEE) != 0) {
   15036 		/* Save off link partner's EEE ability */
   15037 		rv = wm_read_emi_reg_locked(dev, lpa, &sc->eee_lp_ability);
   15038 		if (rv != 0)
   15039 			goto release;
   15040 
   15041 		/* Read EEE advertisement */
   15042 		if ((rv = wm_read_emi_reg_locked(dev, adv_addr, &adv)) != 0)
   15043 			goto release;
   15044 
   15045 		/*
   15046 		 * Enable EEE only for speeds in which the link partner is
   15047 		 * EEE capable and for which we advertise EEE.
   15048 		 */
   15049 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_1000_T)
   15050 			lpi_ctrl |= I82579_LPI_CTRL_EN_1000;
   15051 		if (adv & sc->eee_lp_ability & AN_EEEADVERT_100_TX) {
   15052 			sc->phy.readreg_locked(dev, 2, MII_ANLPAR, &data);
   15053 			if ((data & ANLPAR_TX_FD) != 0)
   15054 				lpi_ctrl |= I82579_LPI_CTRL_EN_100;
   15055 			else {
   15056 				/*
   15057 				 * EEE is not supported in 100Half, so ignore
   15058 				 * partner's EEE in 100 ability if full-duplex
   15059 				 * is not advertised.
   15060 				 */
   15061 				sc->eee_lp_ability
   15062 				    &= ~AN_EEEADVERT_100_TX;
   15063 			}
   15064 		}
   15065 	}
   15066 
   15067 	if (sc->sc_phytype == WMPHY_82579) {
   15068 		rv = wm_read_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, &data);
   15069 		if (rv != 0)
   15070 			goto release;
   15071 
   15072 		data &= ~I82579_LPI_PLL_SHUT_100;
   15073 		rv = wm_write_emi_reg_locked(dev, I82579_LPI_PLL_SHUT, data);
   15074 	}
   15075 
   15076 	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
   15077 	if ((rv = wm_read_emi_reg_locked(dev, pcs_status, &data)) != 0)
   15078 		goto release;
   15079 
   15080 	rv = sc->phy.writereg_locked(dev, 1, I82579_LPI_CTRL, lpi_ctrl);
   15081 release:
   15082 	sc->phy.release(sc);
   15083 
   15084 	return rv;
   15085 }
   15086 
   15087 static int
   15088 wm_set_eee(struct wm_softc *sc)
   15089 {
   15090 	struct ethercom *ec = &sc->sc_ethercom;
   15091 
   15092 	if ((ec->ec_capabilities & ETHERCAP_EEE) == 0)
   15093 		return 0;
   15094 
   15095 	if (sc->sc_type == WM_T_I354) {
   15096 		/* I354 uses an external PHY */
   15097 		return 0; /* not yet */
   15098 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   15099 		return wm_set_eee_i350(sc);
   15100 	else if (sc->sc_type >= WM_T_PCH2)
   15101 		return wm_set_eee_pchlan(sc);
   15102 
   15103 	return 0;
   15104 }
   15105 
   15106 /*
   15107  * Workarounds (mainly PHY related).
   15108  * Basically, PHY's workarounds are in the PHY drivers.
   15109  */
   15110 
   15111 /* Work-around for 82566 Kumeran PCS lock loss */
   15112 static void
   15113 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   15114 {
   15115 	struct mii_data *mii = &sc->sc_mii;
   15116 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15117 	int i;
   15118 	int reg;
   15119 
   15120 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15121 		device_xname(sc->sc_dev), __func__));
   15122 
   15123 	/* If the link is not up, do nothing */
   15124 	if ((status & STATUS_LU) == 0)
   15125 		return;
   15126 
   15127 	/* Nothing to do if the link is other than 1Gbps */
   15128 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   15129 		return;
   15130 
   15131 	for (i = 0; i < 10; i++) {
   15132 		/* read twice */
   15133 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   15134 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   15135 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   15136 			goto out;	/* GOOD! */
   15137 
   15138 		/* Reset the PHY */
   15139 		wm_reset_phy(sc);
   15140 		delay(5*1000);
   15141 	}
   15142 
   15143 	/* Disable GigE link negotiation */
   15144 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   15145 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   15146 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   15147 
   15148 	/*
   15149 	 * Call gig speed drop workaround on Gig disable before accessing
   15150 	 * any PHY registers.
   15151 	 */
   15152 	wm_gig_downshift_workaround_ich8lan(sc);
   15153 
   15154 out:
   15155 	return;
   15156 }
   15157 
   15158 /*
   15159  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   15160  *  @sc: pointer to the HW structure
   15161  *
   15162  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   15163  *  LPLU, Gig disable, MDIC PHY reset):
   15164  *    1) Set Kumeran Near-end loopback
   15165  *    2) Clear Kumeran Near-end loopback
   15166  *  Should only be called for ICH8[m] devices with any 1G Phy.
   15167  */
   15168 static void
   15169 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   15170 {
   15171 	uint16_t kmreg;
   15172 
   15173 	/* Only for igp3 */
   15174 	if (sc->sc_phytype == WMPHY_IGP_3) {
   15175 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   15176 			return;
   15177 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   15178 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   15179 			return;
   15180 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   15181 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   15182 	}
   15183 }
   15184 
   15185 /*
   15186  * Workaround for pch's PHYs
   15187  * XXX should be moved to new PHY driver?
   15188  */
   15189 static void
   15190 wm_hv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15191 {
   15192 
   15193 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15194 		device_xname(sc->sc_dev), __func__));
   15195 	KASSERT(sc->sc_type == WM_T_PCH);
   15196 
   15197 	if (sc->sc_phytype == WMPHY_82577)
   15198 		wm_set_mdio_slow_mode_hv(sc);
   15199 
   15200 	/* XXX (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   15201 
   15202 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   15203 
   15204 	/* 82578 */
   15205 	if (sc->sc_phytype == WMPHY_82578) {
   15206 		struct mii_softc *child;
   15207 
   15208 		/*
   15209 		 * Return registers to default by doing a soft reset then
   15210 		 * writing 0x3140 to the control register
   15211 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   15212 		 */
   15213 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15214 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   15215 			PHY_RESET(child);
   15216 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   15217 			    0x3140);
   15218 		}
   15219 	}
   15220 
   15221 	/* Select page 0 */
   15222 	sc->phy.acquire(sc);
   15223 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   15224 	sc->phy.release(sc);
   15225 
   15226 	/*
   15227 	 * Configure the K1 Si workaround during phy reset assuming there is
   15228 	 * link so that it disables K1 if link is in 1Gbps.
   15229 	 */
   15230 	wm_k1_gig_workaround_hv(sc, 1);
   15231 }
   15232 
   15233 /*
   15234  *  wm_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
   15235  *  @sc:   pointer to the HW structure
   15236  */
   15237 static void
   15238 wm_copy_rx_addrs_to_phy_ich8lan(struct wm_softc *sc)
   15239 {
   15240 	device_t dev = sc->sc_dev;
   15241 	uint32_t mac_reg;
   15242 	uint16_t i, wuce;
   15243 	int count;
   15244 
   15245 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15246 		device_xname(sc->sc_dev), __func__));
   15247 
   15248 	if (sc->phy.acquire(sc) != 0)
   15249 		return;
   15250 	if (wm_enable_phy_wakeup_reg_access_bm(dev, &wuce) != 0)
   15251 		goto release;
   15252 
   15253 	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
   15254 	count = wm_rar_count(sc);
   15255 	for (i = 0; i < count; i++) {
   15256 		uint16_t lo, hi;
   15257 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAL(i));
   15258 		lo = (uint16_t)(mac_reg & 0xffff);
   15259 		hi = (uint16_t)((mac_reg >> 16) & 0xffff);
   15260 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_L(i), &lo, 0, true);
   15261 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_M(i), &hi, 0, true);
   15262 
   15263 		mac_reg = CSR_READ(sc, WMREG_CORDOVA_RAH(i));
   15264 		lo = (uint16_t)(mac_reg & 0xffff);
   15265 		hi = (uint16_t)((mac_reg & RAL_AV) >> 16);
   15266 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_H(i), &lo, 0, true);
   15267 		wm_access_phy_wakeup_reg_bm(dev, BM_RAR_CTRL(i), &hi, 0, true);
   15268 	}
   15269 
   15270 	wm_disable_phy_wakeup_reg_access_bm(dev, &wuce);
   15271 
   15272 release:
   15273 	sc->phy.release(sc);
   15274 }
   15275 
   15276 /*
   15277  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   15278  *  done after every PHY reset.
   15279  */
   15280 static void
   15281 wm_lv_phy_workarounds_ich8lan(struct wm_softc *sc)
   15282 {
   15283 
   15284 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15285 		device_xname(sc->sc_dev), __func__));
   15286 	KASSERT(sc->sc_type == WM_T_PCH2);
   15287 
   15288 	/* Set MDIO slow mode before any other MDIO access */
   15289 	wm_set_mdio_slow_mode_hv(sc);
   15290 
   15291 	/* XXX set MSE higher to enable link to stay up when noise is high */
   15292 	/* XXX drop link after 5 times MSE threshold was reached */
   15293 }
   15294 
   15295 /**
   15296  *  wm_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   15297  *  @link: link up bool flag
   15298  *
   15299  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   15300  *  preventing further DMA write requests.  Workaround the issue by disabling
   15301  *  the de-assertion of the clock request when in 1Gpbs mode.
   15302  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   15303  *  speeds in order to avoid Tx hangs.
   15304  **/
   15305 static int
   15306 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   15307 {
   15308 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   15309 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   15310 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   15311 	uint16_t phyreg;
   15312 
   15313 	if (link && (speed == STATUS_SPEED_1000)) {
   15314 		sc->phy.acquire(sc);
   15315 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15316 		    &phyreg);
   15317 		if (rv != 0)
   15318 			goto release;
   15319 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15320 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   15321 		if (rv != 0)
   15322 			goto release;
   15323 		delay(20);
   15324 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   15325 
   15326 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   15327 		    &phyreg);
   15328 release:
   15329 		sc->phy.release(sc);
   15330 		return rv;
   15331 	}
   15332 
   15333 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   15334 
   15335 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   15336 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   15337 	    || !link
   15338 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   15339 		goto update_fextnvm6;
   15340 
   15341 	phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL);
   15342 
   15343 	/* Clear link status transmit timeout */
   15344 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   15345 	if (speed == STATUS_SPEED_100) {
   15346 		/* Set inband Tx timeout to 5x10us for 100Half */
   15347 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15348 
   15349 		/* Do not extend the K1 entry latency for 100Half */
   15350 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15351 	} else {
   15352 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   15353 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   15354 
   15355 		/* Extend the K1 entry latency for 10 Mbps */
   15356 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   15357 	}
   15358 
   15359 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   15360 
   15361 update_fextnvm6:
   15362 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   15363 	return 0;
   15364 }
   15365 
   15366 /*
   15367  *  wm_k1_gig_workaround_hv - K1 Si workaround
   15368  *  @sc:   pointer to the HW structure
   15369  *  @link: link up bool flag
   15370  *
   15371  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   15372  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   15373  *  If link is down, the function will restore the default K1 setting located
   15374  *  in the NVM.
   15375  */
   15376 static int
   15377 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   15378 {
   15379 	int k1_enable = sc->sc_nvm_k1_enabled;
   15380 
   15381 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15382 		device_xname(sc->sc_dev), __func__));
   15383 
   15384 	if (sc->phy.acquire(sc) != 0)
   15385 		return -1;
   15386 
   15387 	if (link) {
   15388 		k1_enable = 0;
   15389 
   15390 		/* Link stall fix for link up */
   15391 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15392 		    0x0100);
   15393 	} else {
   15394 		/* Link stall fix for link down */
   15395 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   15396 		    0x4100);
   15397 	}
   15398 
   15399 	wm_configure_k1_ich8lan(sc, k1_enable);
   15400 	sc->phy.release(sc);
   15401 
   15402 	return 0;
   15403 }
   15404 
   15405 /*
   15406  *  wm_k1_workaround_lv - K1 Si workaround
   15407  *  @sc:   pointer to the HW structure
   15408  *
   15409  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   15410  *  Disable K1 for 1000 and 100 speeds
   15411  */
   15412 static int
   15413 wm_k1_workaround_lv(struct wm_softc *sc)
   15414 {
   15415 	uint32_t reg;
   15416 	int phyreg;
   15417 
   15418 	if (sc->sc_type != WM_T_PCH2)
   15419 		return 0;
   15420 
   15421 	/* Set K1 beacon duration based on 10Mbps speed */
   15422 	phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS);
   15423 
   15424 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   15425 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   15426 		if (phyreg &
   15427 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   15428 			/* LV 1G/100 Packet drop issue wa  */
   15429 			phyreg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL);
   15430 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   15431 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL, phyreg);
   15432 		} else {
   15433 			/* For 10Mbps */
   15434 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   15435 			reg &= ~FEXTNVM4_BEACON_DURATION;
   15436 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   15437 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   15438 		}
   15439 	}
   15440 
   15441 	return 0;
   15442 }
   15443 
   15444 /*
   15445  *  wm_link_stall_workaround_hv - Si workaround
   15446  *  @sc: pointer to the HW structure
   15447  *
   15448  *  This function works around a Si bug where the link partner can get
   15449  *  a link up indication before the PHY does. If small packets are sent
   15450  *  by the link partner they can be placed in the packet buffer without
   15451  *  being properly accounted for by the PHY and will stall preventing
   15452  *  further packets from being received.  The workaround is to clear the
   15453  *  packet buffer after the PHY detects link up.
   15454  */
   15455 static int
   15456 wm_link_stall_workaround_hv(struct wm_softc *sc)
   15457 {
   15458 	int phyreg;
   15459 
   15460 	if (sc->sc_phytype != WMPHY_82578)
   15461 		return 0;
   15462 
   15463 	/* Do not apply workaround if in PHY loopback bit 14 set */
   15464 	phyreg =  wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR);
   15465 	if ((phyreg & BMCR_LOOP) != 0)
   15466 		return 0;
   15467 
   15468 	/* check if link is up and at 1Gbps */
   15469 	phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS);
   15470 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15471 	    | BM_CS_STATUS_SPEED_MASK;
   15472 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   15473 		| BM_CS_STATUS_SPEED_1000))
   15474 		return 0;
   15475 
   15476 	delay(200 * 1000);	/* XXX too big */
   15477 
   15478 	/* flush the packets in the fifo buffer */
   15479 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15480 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   15481 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   15482 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   15483 
   15484 	return 0;
   15485 }
   15486 
   15487 static void
   15488 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   15489 {
   15490 	uint32_t reg;
   15491 
   15492 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   15493 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   15494 	    reg | HV_KMRN_MDIO_SLOW);
   15495 }
   15496 
   15497 /*
   15498  *  wm_configure_k1_ich8lan - Configure K1 power state
   15499  *  @sc: pointer to the HW structure
   15500  *  @enable: K1 state to configure
   15501  *
   15502  *  Configure the K1 power state based on the provided parameter.
   15503  *  Assumes semaphore already acquired.
   15504  */
   15505 static void
   15506 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   15507 {
   15508 	uint32_t ctrl, ctrl_ext, tmp;
   15509 	uint16_t kmreg;
   15510 	int rv;
   15511 
   15512 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15513 
   15514 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   15515 	if (rv != 0)
   15516 		return;
   15517 
   15518 	if (k1_enable)
   15519 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   15520 	else
   15521 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   15522 
   15523 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   15524 	if (rv != 0)
   15525 		return;
   15526 
   15527 	delay(20);
   15528 
   15529 	ctrl = CSR_READ(sc, WMREG_CTRL);
   15530 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   15531 
   15532 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   15533 	tmp |= CTRL_FRCSPD;
   15534 
   15535 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   15536 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   15537 	CSR_WRITE_FLUSH(sc);
   15538 	delay(20);
   15539 
   15540 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   15541 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   15542 	CSR_WRITE_FLUSH(sc);
   15543 	delay(20);
   15544 
   15545 	return;
   15546 }
   15547 
   15548 /* special case - for 82575 - need to do manual init ... */
   15549 static void
   15550 wm_reset_init_script_82575(struct wm_softc *sc)
   15551 {
   15552 	/*
   15553 	 * remark: this is untested code - we have no board without EEPROM
   15554 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   15555 	 */
   15556 
   15557 	/* SerDes configuration via SERDESCTRL */
   15558 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   15559 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   15560 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   15561 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   15562 
   15563 	/* CCM configuration via CCMCTL register */
   15564 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   15565 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   15566 
   15567 	/* PCIe lanes configuration */
   15568 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   15569 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   15570 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   15571 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   15572 
   15573 	/* PCIe PLL Configuration */
   15574 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   15575 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   15576 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   15577 }
   15578 
   15579 static void
   15580 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   15581 {
   15582 	uint32_t reg;
   15583 	uint16_t nvmword;
   15584 	int rv;
   15585 
   15586 	if (sc->sc_type != WM_T_82580)
   15587 		return;
   15588 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   15589 		return;
   15590 
   15591 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   15592 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   15593 	if (rv != 0) {
   15594 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   15595 		    __func__);
   15596 		return;
   15597 	}
   15598 
   15599 	reg = CSR_READ(sc, WMREG_MDICNFG);
   15600 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   15601 		reg |= MDICNFG_DEST;
   15602 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   15603 		reg |= MDICNFG_COM_MDIO;
   15604 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15605 }
   15606 
   15607 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   15608 
   15609 static bool
   15610 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   15611 {
   15612 	uint32_t reg;
   15613 	uint16_t id1, id2;
   15614 	int i, rv;
   15615 
   15616 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15617 		device_xname(sc->sc_dev), __func__));
   15618 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15619 
   15620 	id1 = id2 = 0xffff;
   15621 	for (i = 0; i < 2; i++) {
   15622 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   15623 		    &id1);
   15624 		if ((rv != 0) || MII_INVALIDID(id1))
   15625 			continue;
   15626 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   15627 		    &id2);
   15628 		if ((rv != 0) || MII_INVALIDID(id2))
   15629 			continue;
   15630 		break;
   15631 	}
   15632 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   15633 		goto out;
   15634 
   15635 	/*
   15636 	 * In case the PHY needs to be in mdio slow mode,
   15637 	 * set slow mode and try to get the PHY id again.
   15638 	 */
   15639 	if (sc->sc_type < WM_T_PCH_LPT) {
   15640 		sc->phy.release(sc);
   15641 		wm_set_mdio_slow_mode_hv(sc);
   15642 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   15643 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   15644 		sc->phy.acquire(sc);
   15645 	}
   15646 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   15647 		printf("XXX return with false\n");
   15648 		return false;
   15649 	}
   15650 out:
   15651 	if (sc->sc_type >= WM_T_PCH_LPT) {
   15652 		/* Only unforce SMBus if ME is not active */
   15653 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15654 			uint16_t phyreg;
   15655 
   15656 			/* Unforce SMBus mode in PHY */
   15657 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   15658 			    CV_SMB_CTRL, &phyreg);
   15659 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15660 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   15661 			    CV_SMB_CTRL, phyreg);
   15662 
   15663 			/* Unforce SMBus mode in MAC */
   15664 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15665 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15666 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15667 		}
   15668 	}
   15669 	return true;
   15670 }
   15671 
   15672 static void
   15673 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   15674 {
   15675 	uint32_t reg;
   15676 	int i;
   15677 
   15678 	/* Set PHY Config Counter to 50msec */
   15679 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   15680 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   15681 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   15682 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   15683 
   15684 	/* Toggle LANPHYPC */
   15685 	reg = CSR_READ(sc, WMREG_CTRL);
   15686 	reg |= CTRL_LANPHYPC_OVERRIDE;
   15687 	reg &= ~CTRL_LANPHYPC_VALUE;
   15688 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15689 	CSR_WRITE_FLUSH(sc);
   15690 	delay(1000);
   15691 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   15692 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15693 	CSR_WRITE_FLUSH(sc);
   15694 
   15695 	if (sc->sc_type < WM_T_PCH_LPT)
   15696 		delay(50 * 1000);
   15697 	else {
   15698 		i = 20;
   15699 
   15700 		do {
   15701 			delay(5 * 1000);
   15702 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   15703 		    && i--);
   15704 
   15705 		delay(30 * 1000);
   15706 	}
   15707 }
   15708 
   15709 static int
   15710 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   15711 {
   15712 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   15713 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   15714 	uint32_t rxa;
   15715 	uint16_t scale = 0, lat_enc = 0;
   15716 	int32_t obff_hwm = 0;
   15717 	int64_t lat_ns, value;
   15718 
   15719 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15720 		device_xname(sc->sc_dev), __func__));
   15721 
   15722 	if (link) {
   15723 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   15724 		uint32_t status;
   15725 		uint16_t speed;
   15726 		pcireg_t preg;
   15727 
   15728 		status = CSR_READ(sc, WMREG_STATUS);
   15729 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   15730 		case STATUS_SPEED_10:
   15731 			speed = 10;
   15732 			break;
   15733 		case STATUS_SPEED_100:
   15734 			speed = 100;
   15735 			break;
   15736 		case STATUS_SPEED_1000:
   15737 			speed = 1000;
   15738 			break;
   15739 		default:
   15740 			device_printf(sc->sc_dev, "Unknown speed "
   15741 			    "(status = %08x)\n", status);
   15742 			return -1;
   15743 		}
   15744 
   15745 		/* Rx Packet Buffer Allocation size (KB) */
   15746 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   15747 
   15748 		/*
   15749 		 * Determine the maximum latency tolerated by the device.
   15750 		 *
   15751 		 * Per the PCIe spec, the tolerated latencies are encoded as
   15752 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   15753 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   15754 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   15755 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   15756 		 */
   15757 		lat_ns = ((int64_t)rxa * 1024 -
   15758 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   15759 			+ ETHER_HDR_LEN))) * 8 * 1000;
   15760 		if (lat_ns < 0)
   15761 			lat_ns = 0;
   15762 		else
   15763 			lat_ns /= speed;
   15764 		value = lat_ns;
   15765 
   15766 		while (value > LTRV_VALUE) {
   15767 			scale ++;
   15768 			value = howmany(value, __BIT(5));
   15769 		}
   15770 		if (scale > LTRV_SCALE_MAX) {
   15771 			printf("%s: Invalid LTR latency scale %d\n",
   15772 			    device_xname(sc->sc_dev), scale);
   15773 			return -1;
   15774 		}
   15775 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   15776 
   15777 		/* Determine the maximum latency tolerated by the platform */
   15778 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15779 		    WM_PCI_LTR_CAP_LPT);
   15780 		max_snoop = preg & 0xffff;
   15781 		max_nosnoop = preg >> 16;
   15782 
   15783 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   15784 
   15785 		if (lat_enc > max_ltr_enc) {
   15786 			lat_enc = max_ltr_enc;
   15787 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   15788 			    * PCI_LTR_SCALETONS(
   15789 				    __SHIFTOUT(lat_enc,
   15790 					PCI_LTR_MAXSNOOPLAT_SCALE));
   15791 		}
   15792 
   15793 		if (lat_ns) {
   15794 			lat_ns *= speed * 1000;
   15795 			lat_ns /= 8;
   15796 			lat_ns /= 1000000000;
   15797 			obff_hwm = (int32_t)(rxa - lat_ns);
   15798 		}
   15799 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   15800 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   15801 			    "(rxa = %d, lat_ns = %d)\n",
   15802 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   15803 			return -1;
   15804 		}
   15805 	}
   15806 	/* Snoop and No-Snoop latencies the same */
   15807 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   15808 	CSR_WRITE(sc, WMREG_LTRV, reg);
   15809 
   15810 	/* Set OBFF high water mark */
   15811 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   15812 	reg |= obff_hwm;
   15813 	CSR_WRITE(sc, WMREG_SVT, reg);
   15814 
   15815 	/* Enable OBFF */
   15816 	reg = CSR_READ(sc, WMREG_SVCR);
   15817 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   15818 	CSR_WRITE(sc, WMREG_SVCR, reg);
   15819 
   15820 	return 0;
   15821 }
   15822 
   15823 /*
   15824  * I210 Errata 25 and I211 Errata 10
   15825  * Slow System Clock.
   15826  */
   15827 static void
   15828 wm_pll_workaround_i210(struct wm_softc *sc)
   15829 {
   15830 	uint32_t mdicnfg, wuc;
   15831 	uint32_t reg;
   15832 	pcireg_t pcireg;
   15833 	uint32_t pmreg;
   15834 	uint16_t nvmword, tmp_nvmword;
   15835 	int phyval;
   15836 	bool wa_done = false;
   15837 	int i;
   15838 
   15839 	/* Get Power Management cap offset */
   15840 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15841 	    &pmreg, NULL) == 0)
   15842 		return;
   15843 
   15844 	/* Save WUC and MDICNFG registers */
   15845 	wuc = CSR_READ(sc, WMREG_WUC);
   15846 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   15847 
   15848 	reg = mdicnfg & ~MDICNFG_DEST;
   15849 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15850 
   15851 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   15852 		nvmword = INVM_DEFAULT_AL;
   15853 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   15854 
   15855 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   15856 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   15857 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   15858 
   15859 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   15860 			break; /* OK */
   15861 		}
   15862 
   15863 		wa_done = true;
   15864 		/* Directly reset the internal PHY */
   15865 		reg = CSR_READ(sc, WMREG_CTRL);
   15866 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   15867 
   15868 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15869 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   15870 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15871 
   15872 		CSR_WRITE(sc, WMREG_WUC, 0);
   15873 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   15874 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   15875 
   15876 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15877 		    pmreg + PCI_PMCSR);
   15878 		pcireg |= PCI_PMCSR_STATE_D3;
   15879 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15880 		    pmreg + PCI_PMCSR, pcireg);
   15881 		delay(1000);
   15882 		pcireg &= ~PCI_PMCSR_STATE_D3;
   15883 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15884 		    pmreg + PCI_PMCSR, pcireg);
   15885 
   15886 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   15887 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   15888 
   15889 		/* Restore WUC register */
   15890 		CSR_WRITE(sc, WMREG_WUC, wuc);
   15891 	}
   15892 
   15893 	/* Restore MDICNFG setting */
   15894 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   15895 	if (wa_done)
   15896 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   15897 }
   15898 
   15899 static void
   15900 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   15901 {
   15902 	uint32_t reg;
   15903 
   15904 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15905 		device_xname(sc->sc_dev), __func__));
   15906 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   15907 	    || (sc->sc_type == WM_T_PCH_CNP));
   15908 
   15909 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15910 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   15911 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15912 
   15913 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   15914 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   15915 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   15916 }
   15917