Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.555
      1 /*	$NetBSD: if_wm.c,v 1.555 2018/01/16 07:23:13 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.555 2018/01/16 07:23:13 knakahara Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 #include <dev/mii/ihphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 #ifndef WM_DISABLE_MSI
    177 #define	WM_DISABLE_MSI 0
    178 #endif
    179 #ifndef WM_DISABLE_MSIX
    180 #define	WM_DISABLE_MSIX 0
    181 #endif
    182 
    183 int wm_disable_msi = WM_DISABLE_MSI;
    184 int wm_disable_msix = WM_DISABLE_MSIX;
    185 
    186 /*
    187  * Transmit descriptor list size.  Due to errata, we can only have
    188  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    189  * on >= 82544.  We tell the upper layers that they can queue a lot
    190  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    191  * of them at a time.
    192  *
    193  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    194  * chains containing many small mbufs have been observed in zero-copy
    195  * situations with jumbo frames.
    196  */
    197 #define	WM_NTXSEGS		256
    198 #define	WM_IFQUEUELEN		256
    199 #define	WM_TXQUEUELEN_MAX	64
    200 #define	WM_TXQUEUELEN_MAX_82547	16
    201 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    202 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    203 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    204 #define	WM_NTXDESC_82542	256
    205 #define	WM_NTXDESC_82544	4096
    206 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    207 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    208 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    209 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    210 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    211 
    212 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    213 
    214 #define	WM_TXINTERQSIZE		256
    215 
    216 /*
    217  * Receive descriptor list size.  We have one Rx buffer for normal
    218  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    219  * packet.  We allocate 256 receive descriptors, each with a 2k
    220  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    221  */
    222 #define	WM_NRXDESC		256
    223 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    224 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    225 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    226 
    227 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    228 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    229 #endif
    230 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    231 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    232 #endif
    233 
    234 typedef union txdescs {
    235 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    236 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    237 } txdescs_t;
    238 
    239 typedef union rxdescs {
    240 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    241 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    242 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    243 } rxdescs_t;
    244 
    245 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    246 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    247 
    248 /*
    249  * Software state for transmit jobs.
    250  */
    251 struct wm_txsoft {
    252 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    253 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    254 	int txs_firstdesc;		/* first descriptor in packet */
    255 	int txs_lastdesc;		/* last descriptor in packet */
    256 	int txs_ndesc;			/* # of descriptors used */
    257 };
    258 
    259 /*
    260  * Software state for receive buffers.  Each descriptor gets a
    261  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    262  * more than one buffer, we chain them together.
    263  */
    264 struct wm_rxsoft {
    265 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    266 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    267 };
    268 
    269 #define WM_LINKUP_TIMEOUT	50
    270 
    271 static uint16_t swfwphysem[] = {
    272 	SWFW_PHY0_SM,
    273 	SWFW_PHY1_SM,
    274 	SWFW_PHY2_SM,
    275 	SWFW_PHY3_SM
    276 };
    277 
    278 static const uint32_t wm_82580_rxpbs_table[] = {
    279 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    280 };
    281 
    282 struct wm_softc;
    283 
    284 #ifdef WM_EVENT_COUNTERS
    285 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    286 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    287 	struct evcnt qname##_ev_##evname;
    288 
    289 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    290 	do{								\
    291 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    292 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    293 		    "%s%02d%s", #qname, (qnum), #evname);		\
    294 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    295 		    (evtype), NULL, (xname),				\
    296 		    (q)->qname##_##evname##_evcnt_name);		\
    297 	}while(0)
    298 
    299 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    300 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    301 
    302 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    303 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    304 
    305 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    306 	evcnt_detach(&(q)->qname##_ev_##evname);
    307 #endif /* WM_EVENT_COUNTERS */
    308 
    309 struct wm_txqueue {
    310 	kmutex_t *txq_lock;		/* lock for tx operations */
    311 
    312 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    313 
    314 	/* Software state for the transmit descriptors. */
    315 	int txq_num;			/* must be a power of two */
    316 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    317 
    318 	/* TX control data structures. */
    319 	int txq_ndesc;			/* must be a power of two */
    320 	size_t txq_descsize;		/* a tx descriptor size */
    321 	txdescs_t *txq_descs_u;
    322         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    323 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    324 	int txq_desc_rseg;		/* real number of control segment */
    325 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    326 #define	txq_descs	txq_descs_u->sctxu_txdescs
    327 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    328 
    329 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    330 
    331 	int txq_free;			/* number of free Tx descriptors */
    332 	int txq_next;			/* next ready Tx descriptor */
    333 
    334 	int txq_sfree;			/* number of free Tx jobs */
    335 	int txq_snext;			/* next free Tx job */
    336 	int txq_sdirty;			/* dirty Tx jobs */
    337 
    338 	/* These 4 variables are used only on the 82547. */
    339 	int txq_fifo_size;		/* Tx FIFO size */
    340 	int txq_fifo_head;		/* current head of FIFO */
    341 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    342 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    343 
    344 	/*
    345 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    346 	 * CPUs. This queue intermediate them without block.
    347 	 */
    348 	pcq_t *txq_interq;
    349 
    350 	/*
    351 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    352 	 * to manage Tx H/W queue's busy flag.
    353 	 */
    354 	int txq_flags;			/* flags for H/W queue, see below */
    355 #define	WM_TXQ_NO_SPACE	0x1
    356 
    357 	bool txq_stopping;
    358 
    359 	uint32_t txq_packets;		/* for AIM */
    360 	uint32_t txq_bytes;		/* for AIM */
    361 #ifdef WM_EVENT_COUNTERS
    362 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    363 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    364 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    365 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    366 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    367 						/* XXX not used? */
    368 
    369 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    374 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    375 
    376 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    377 
    378 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    379 
    380 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    381 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    382 #endif /* WM_EVENT_COUNTERS */
    383 };
    384 
    385 struct wm_rxqueue {
    386 	kmutex_t *rxq_lock;		/* lock for rx operations */
    387 
    388 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    389 
    390 	/* Software state for the receive descriptors. */
    391 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    392 
    393 	/* RX control data structures. */
    394 	int rxq_ndesc;			/* must be a power of two */
    395 	size_t rxq_descsize;		/* a rx descriptor size */
    396 	rxdescs_t *rxq_descs_u;
    397 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    398 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    399 	int rxq_desc_rseg;		/* real number of control segment */
    400 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    401 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    402 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    403 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    404 
    405 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    406 
    407 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    408 	int rxq_discard;
    409 	int rxq_len;
    410 	struct mbuf *rxq_head;
    411 	struct mbuf *rxq_tail;
    412 	struct mbuf **rxq_tailp;
    413 
    414 	bool rxq_stopping;
    415 
    416 	uint32_t rxq_packets;		/* for AIM */
    417 	uint32_t rxq_bytes;		/* for AIM */
    418 #ifdef WM_EVENT_COUNTERS
    419 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    420 
    421 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    422 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    423 #endif
    424 };
    425 
    426 struct wm_queue {
    427 	int wmq_id;			/* index of transmit and receive queues */
    428 	int wmq_intr_idx;		/* index of MSI-X tables */
    429 
    430 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    431 	bool wmq_set_itr;
    432 
    433 	struct wm_txqueue wmq_txq;
    434 	struct wm_rxqueue wmq_rxq;
    435 
    436 	void *wmq_si;
    437 };
    438 
    439 struct wm_phyop {
    440 	int (*acquire)(struct wm_softc *);
    441 	void (*release)(struct wm_softc *);
    442 	int reset_delay_us;
    443 };
    444 
    445 struct wm_nvmop {
    446 	int (*acquire)(struct wm_softc *);
    447 	void (*release)(struct wm_softc *);
    448 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    449 };
    450 
    451 /*
    452  * Software state per device.
    453  */
    454 struct wm_softc {
    455 	device_t sc_dev;		/* generic device information */
    456 	bus_space_tag_t sc_st;		/* bus space tag */
    457 	bus_space_handle_t sc_sh;	/* bus space handle */
    458 	bus_size_t sc_ss;		/* bus space size */
    459 	bus_space_tag_t sc_iot;		/* I/O space tag */
    460 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    461 	bus_size_t sc_ios;		/* I/O space size */
    462 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    463 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    464 	bus_size_t sc_flashs;		/* flash registers space size */
    465 	off_t sc_flashreg_offset;	/*
    466 					 * offset to flash registers from
    467 					 * start of BAR
    468 					 */
    469 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    470 
    471 	struct ethercom sc_ethercom;	/* ethernet common data */
    472 	struct mii_data sc_mii;		/* MII/media information */
    473 
    474 	pci_chipset_tag_t sc_pc;
    475 	pcitag_t sc_pcitag;
    476 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    477 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    478 
    479 	uint16_t sc_pcidevid;		/* PCI device ID */
    480 	wm_chip_type sc_type;		/* MAC type */
    481 	int sc_rev;			/* MAC revision */
    482 	wm_phy_type sc_phytype;		/* PHY type */
    483 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    484 #define	WM_MEDIATYPE_UNKNOWN		0x00
    485 #define	WM_MEDIATYPE_FIBER		0x01
    486 #define	WM_MEDIATYPE_COPPER		0x02
    487 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    488 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    489 	int sc_flags;			/* flags; see below */
    490 	int sc_if_flags;		/* last if_flags */
    491 	int sc_flowflags;		/* 802.3x flow control flags */
    492 	int sc_align_tweak;
    493 
    494 	void *sc_ihs[WM_MAX_NINTR];	/*
    495 					 * interrupt cookie.
    496 					 * - legacy and msi use sc_ihs[0] only
    497 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    498 					 */
    499 	pci_intr_handle_t *sc_intrs;	/*
    500 					 * legacy and msi use sc_intrs[0] only
    501 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    502 					 */
    503 	int sc_nintrs;			/* number of interrupts */
    504 
    505 	int sc_link_intr_idx;		/* index of MSI-X tables */
    506 
    507 	callout_t sc_tick_ch;		/* tick callout */
    508 	bool sc_core_stopping;
    509 
    510 	int sc_nvm_ver_major;
    511 	int sc_nvm_ver_minor;
    512 	int sc_nvm_ver_build;
    513 	int sc_nvm_addrbits;		/* NVM address bits */
    514 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    515 	int sc_ich8_flash_base;
    516 	int sc_ich8_flash_bank_size;
    517 	int sc_nvm_k1_enabled;
    518 
    519 	int sc_nqueues;
    520 	struct wm_queue *sc_queue;
    521 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    522 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    523 
    524 	int sc_affinity_offset;
    525 
    526 #ifdef WM_EVENT_COUNTERS
    527 	/* Event counters. */
    528 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    529 
    530         /* WM_T_82542_2_1 only */
    531 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    532 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    533 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    534 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    535 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    536 #endif /* WM_EVENT_COUNTERS */
    537 
    538 	/* This variable are used only on the 82547. */
    539 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    540 
    541 	uint32_t sc_ctrl;		/* prototype CTRL register */
    542 #if 0
    543 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    544 #endif
    545 	uint32_t sc_icr;		/* prototype interrupt bits */
    546 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    547 	uint32_t sc_tctl;		/* prototype TCTL register */
    548 	uint32_t sc_rctl;		/* prototype RCTL register */
    549 	uint32_t sc_txcw;		/* prototype TXCW register */
    550 	uint32_t sc_tipg;		/* prototype TIPG register */
    551 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    552 	uint32_t sc_pba;		/* prototype PBA register */
    553 
    554 	int sc_tbi_linkup;		/* TBI link status */
    555 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    556 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    557 
    558 	int sc_mchash_type;		/* multicast filter offset */
    559 
    560 	krndsource_t rnd_source;	/* random source */
    561 
    562 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    563 
    564 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    565 	kmutex_t *sc_ich_phymtx;	/*
    566 					 * 82574/82583/ICH/PCH specific PHY
    567 					 * mutex. For 82574/82583, the mutex
    568 					 * is used for both PHY and NVM.
    569 					 */
    570 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    571 
    572 	struct wm_phyop phy;
    573 	struct wm_nvmop nvm;
    574 };
    575 
    576 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    577 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    578 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    579 
    580 #define	WM_RXCHAIN_RESET(rxq)						\
    581 do {									\
    582 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    583 	*(rxq)->rxq_tailp = NULL;					\
    584 	(rxq)->rxq_len = 0;						\
    585 } while (/*CONSTCOND*/0)
    586 
    587 #define	WM_RXCHAIN_LINK(rxq, m)						\
    588 do {									\
    589 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    590 	(rxq)->rxq_tailp = &(m)->m_next;				\
    591 } while (/*CONSTCOND*/0)
    592 
    593 #ifdef WM_EVENT_COUNTERS
    594 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    595 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    596 
    597 #define WM_Q_EVCNT_INCR(qname, evname)			\
    598 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    599 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    600 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    601 #else /* !WM_EVENT_COUNTERS */
    602 #define	WM_EVCNT_INCR(ev)	/* nothing */
    603 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    604 
    605 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    606 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    607 #endif /* !WM_EVENT_COUNTERS */
    608 
    609 #define	CSR_READ(sc, reg)						\
    610 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    611 #define	CSR_WRITE(sc, reg, val)						\
    612 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    613 #define	CSR_WRITE_FLUSH(sc)						\
    614 	(void) CSR_READ((sc), WMREG_STATUS)
    615 
    616 #define ICH8_FLASH_READ32(sc, reg)					\
    617 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    618 	    (reg) + sc->sc_flashreg_offset)
    619 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    620 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    621 	    (reg) + sc->sc_flashreg_offset, (data))
    622 
    623 #define ICH8_FLASH_READ16(sc, reg)					\
    624 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    625 	    (reg) + sc->sc_flashreg_offset)
    626 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    627 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    628 	    (reg) + sc->sc_flashreg_offset, (data))
    629 
    630 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    631 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    632 
    633 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    634 #define	WM_CDTXADDR_HI(txq, x)						\
    635 	(sizeof(bus_addr_t) == 8 ?					\
    636 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    637 
    638 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    639 #define	WM_CDRXADDR_HI(rxq, x)						\
    640 	(sizeof(bus_addr_t) == 8 ?					\
    641 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    642 
    643 /*
    644  * Register read/write functions.
    645  * Other than CSR_{READ|WRITE}().
    646  */
    647 #if 0
    648 static inline uint32_t wm_io_read(struct wm_softc *, int);
    649 #endif
    650 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    651 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    652 	uint32_t, uint32_t);
    653 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    654 
    655 /*
    656  * Descriptor sync/init functions.
    657  */
    658 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    659 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    660 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    661 
    662 /*
    663  * Device driver interface functions and commonly used functions.
    664  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    665  */
    666 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    667 static int	wm_match(device_t, cfdata_t, void *);
    668 static void	wm_attach(device_t, device_t, void *);
    669 static int	wm_detach(device_t, int);
    670 static bool	wm_suspend(device_t, const pmf_qual_t *);
    671 static bool	wm_resume(device_t, const pmf_qual_t *);
    672 static void	wm_watchdog(struct ifnet *);
    673 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    674 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *);
    675 static void	wm_tick(void *);
    676 static int	wm_ifflags_cb(struct ethercom *);
    677 static int	wm_ioctl(struct ifnet *, u_long, void *);
    678 /* MAC address related */
    679 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    680 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    681 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    682 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    683 static void	wm_set_filter(struct wm_softc *);
    684 /* Reset and init related */
    685 static void	wm_set_vlan(struct wm_softc *);
    686 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    687 static void	wm_get_auto_rd_done(struct wm_softc *);
    688 static void	wm_lan_init_done(struct wm_softc *);
    689 static void	wm_get_cfg_done(struct wm_softc *);
    690 static void	wm_phy_post_reset(struct wm_softc *);
    691 static void	wm_write_smbus_addr(struct wm_softc *);
    692 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    693 static void	wm_initialize_hardware_bits(struct wm_softc *);
    694 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    695 static void	wm_reset_phy(struct wm_softc *);
    696 static void	wm_flush_desc_rings(struct wm_softc *);
    697 static void	wm_reset(struct wm_softc *);
    698 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    699 static void	wm_rxdrain(struct wm_rxqueue *);
    700 static void	wm_rss_getkey(uint8_t *);
    701 static void	wm_init_rss(struct wm_softc *);
    702 static void	wm_adjust_qnum(struct wm_softc *, int);
    703 static inline bool	wm_is_using_msix(struct wm_softc *);
    704 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    705 static int	wm_softint_establish(struct wm_softc *, int, int);
    706 static int	wm_setup_legacy(struct wm_softc *);
    707 static int	wm_setup_msix(struct wm_softc *);
    708 static int	wm_init(struct ifnet *);
    709 static int	wm_init_locked(struct ifnet *);
    710 static void	wm_unset_stopping_flags(struct wm_softc *);
    711 static void	wm_set_stopping_flags(struct wm_softc *);
    712 static void	wm_stop(struct ifnet *, int);
    713 static void	wm_stop_locked(struct ifnet *, int);
    714 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    715 static void	wm_82547_txfifo_stall(void *);
    716 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    717 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    718 /* DMA related */
    719 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    720 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    721 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    722 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    723     struct wm_txqueue *);
    724 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    725 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    726 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    727     struct wm_rxqueue *);
    728 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    729 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    730 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    731 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    732 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    733 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    734 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    735     struct wm_txqueue *);
    736 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    737     struct wm_rxqueue *);
    738 static int	wm_alloc_txrx_queues(struct wm_softc *);
    739 static void	wm_free_txrx_queues(struct wm_softc *);
    740 static int	wm_init_txrx_queues(struct wm_softc *);
    741 /* Start */
    742 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    743     struct wm_txsoft *, uint32_t *, uint8_t *);
    744 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    745 static void	wm_start(struct ifnet *);
    746 static void	wm_start_locked(struct ifnet *);
    747 static int	wm_transmit(struct ifnet *, struct mbuf *);
    748 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    749 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    750 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    751     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    752 static void	wm_nq_start(struct ifnet *);
    753 static void	wm_nq_start_locked(struct ifnet *);
    754 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    755 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    756 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    757 static void	wm_deferred_start_locked(struct wm_txqueue *);
    758 static void	wm_handle_queue(void *);
    759 /* Interrupt */
    760 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    761 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    762 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    763 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    764 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    765 static void	wm_linkintr(struct wm_softc *, uint32_t);
    766 static int	wm_intr_legacy(void *);
    767 static inline void	wm_txrxintr_disable(struct wm_queue *);
    768 static inline void	wm_txrxintr_enable(struct wm_queue *);
    769 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    770 static int	wm_txrxintr_msix(void *);
    771 static int	wm_linkintr_msix(void *);
    772 
    773 /*
    774  * Media related.
    775  * GMII, SGMII, TBI, SERDES and SFP.
    776  */
    777 /* Common */
    778 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    779 /* GMII related */
    780 static void	wm_gmii_reset(struct wm_softc *);
    781 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    782 static int	wm_get_phy_id_82575(struct wm_softc *);
    783 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    784 static int	wm_gmii_mediachange(struct ifnet *);
    785 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    786 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    787 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    788 static int	wm_gmii_i82543_readreg(device_t, int, int);
    789 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    790 static int	wm_gmii_mdic_readreg(device_t, int, int);
    791 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    792 static int	wm_gmii_i82544_readreg(device_t, int, int);
    793 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    794 static int	wm_gmii_i80003_readreg(device_t, int, int);
    795 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    796 static int	wm_gmii_bm_readreg(device_t, int, int);
    797 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    798 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    799 static int	wm_gmii_hv_readreg(device_t, int, int);
    800 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    801 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    802 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    803 static int	wm_gmii_82580_readreg(device_t, int, int);
    804 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    805 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    806 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    807 static void	wm_gmii_statchg(struct ifnet *);
    808 /*
    809  * kumeran related (80003, ICH* and PCH*).
    810  * These functions are not for accessing MII registers but for accessing
    811  * kumeran specific registers.
    812  */
    813 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    814 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    815 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    816 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    817 /* SGMII */
    818 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    819 static int	wm_sgmii_readreg(device_t, int, int);
    820 static void	wm_sgmii_writereg(device_t, int, int, int);
    821 /* TBI related */
    822 static void	wm_tbi_mediainit(struct wm_softc *);
    823 static int	wm_tbi_mediachange(struct ifnet *);
    824 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    825 static int	wm_check_for_link(struct wm_softc *);
    826 static void	wm_tbi_tick(struct wm_softc *);
    827 /* SERDES related */
    828 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    829 static int	wm_serdes_mediachange(struct ifnet *);
    830 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    831 static void	wm_serdes_tick(struct wm_softc *);
    832 /* SFP related */
    833 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    834 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    835 
    836 /*
    837  * NVM related.
    838  * Microwire, SPI (w/wo EERD) and Flash.
    839  */
    840 /* Misc functions */
    841 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    842 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    843 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    844 /* Microwire */
    845 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    846 /* SPI */
    847 static int	wm_nvm_ready_spi(struct wm_softc *);
    848 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    849 /* Using with EERD */
    850 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    851 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    852 /* Flash */
    853 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    854     unsigned int *);
    855 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    856 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    857 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    858 	uint32_t *);
    859 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    860 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    861 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    862 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    863 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    864 /* iNVM */
    865 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    866 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    867 /* Lock, detecting NVM type, validate checksum and read */
    868 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    869 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    870 static int	wm_nvm_validate_checksum(struct wm_softc *);
    871 static void	wm_nvm_version_invm(struct wm_softc *);
    872 static void	wm_nvm_version(struct wm_softc *);
    873 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    874 
    875 /*
    876  * Hardware semaphores.
    877  * Very complexed...
    878  */
    879 static int	wm_get_null(struct wm_softc *);
    880 static void	wm_put_null(struct wm_softc *);
    881 static int	wm_get_eecd(struct wm_softc *);
    882 static void	wm_put_eecd(struct wm_softc *);
    883 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    884 static void	wm_put_swsm_semaphore(struct wm_softc *);
    885 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    886 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    887 static int	wm_get_nvm_80003(struct wm_softc *);
    888 static void	wm_put_nvm_80003(struct wm_softc *);
    889 static int	wm_get_nvm_82571(struct wm_softc *);
    890 static void	wm_put_nvm_82571(struct wm_softc *);
    891 static int	wm_get_phy_82575(struct wm_softc *);
    892 static void	wm_put_phy_82575(struct wm_softc *);
    893 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    894 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    895 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    896 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    897 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    898 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    899 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    900 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    901 
    902 /*
    903  * Management mode and power management related subroutines.
    904  * BMC, AMT, suspend/resume and EEE.
    905  */
    906 #if 0
    907 static int	wm_check_mng_mode(struct wm_softc *);
    908 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    909 static int	wm_check_mng_mode_82574(struct wm_softc *);
    910 static int	wm_check_mng_mode_generic(struct wm_softc *);
    911 #endif
    912 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    913 static bool	wm_phy_resetisblocked(struct wm_softc *);
    914 static void	wm_get_hw_control(struct wm_softc *);
    915 static void	wm_release_hw_control(struct wm_softc *);
    916 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    917 static void	wm_smbustopci(struct wm_softc *);
    918 static void	wm_init_manageability(struct wm_softc *);
    919 static void	wm_release_manageability(struct wm_softc *);
    920 static void	wm_get_wakeup(struct wm_softc *);
    921 static void	wm_ulp_disable(struct wm_softc *);
    922 static void	wm_enable_phy_wakeup(struct wm_softc *);
    923 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    924 static void	wm_enable_wakeup(struct wm_softc *);
    925 static void	wm_disable_aspm(struct wm_softc *);
    926 /* LPLU (Low Power Link Up) */
    927 static void	wm_lplu_d0_disable(struct wm_softc *);
    928 /* EEE */
    929 static void	wm_set_eee_i350(struct wm_softc *);
    930 
    931 /*
    932  * Workarounds (mainly PHY related).
    933  * Basically, PHY's workarounds are in the PHY drivers.
    934  */
    935 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    936 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    937 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    938 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    939 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    940 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    941 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    942 static void	wm_reset_init_script_82575(struct wm_softc *);
    943 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    944 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    945 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    946 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    947 static void	wm_pll_workaround_i210(struct wm_softc *);
    948 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    949 
    950 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    951     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    952 
    953 /*
    954  * Devices supported by this driver.
    955  */
    956 static const struct wm_product {
    957 	pci_vendor_id_t		wmp_vendor;
    958 	pci_product_id_t	wmp_product;
    959 	const char		*wmp_name;
    960 	wm_chip_type		wmp_type;
    961 	uint32_t		wmp_flags;
    962 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    963 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    964 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    965 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    966 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    967 } wm_products[] = {
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    969 	  "Intel i82542 1000BASE-X Ethernet",
    970 	  WM_T_82542_2_1,	WMP_F_FIBER },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    973 	  "Intel i82543GC 1000BASE-X Ethernet",
    974 	  WM_T_82543,		WMP_F_FIBER },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    977 	  "Intel i82543GC 1000BASE-T Ethernet",
    978 	  WM_T_82543,		WMP_F_COPPER },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    981 	  "Intel i82544EI 1000BASE-T Ethernet",
    982 	  WM_T_82544,		WMP_F_COPPER },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    985 	  "Intel i82544EI 1000BASE-X Ethernet",
    986 	  WM_T_82544,		WMP_F_FIBER },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    989 	  "Intel i82544GC 1000BASE-T Ethernet",
    990 	  WM_T_82544,		WMP_F_COPPER },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    993 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    994 	  WM_T_82544,		WMP_F_COPPER },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    997 	  "Intel i82540EM 1000BASE-T Ethernet",
    998 	  WM_T_82540,		WMP_F_COPPER },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1001 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1002 	  WM_T_82540,		WMP_F_COPPER },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1005 	  "Intel i82540EP 1000BASE-T Ethernet",
   1006 	  WM_T_82540,		WMP_F_COPPER },
   1007 
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1009 	  "Intel i82540EP 1000BASE-T Ethernet",
   1010 	  WM_T_82540,		WMP_F_COPPER },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1013 	  "Intel i82540EP 1000BASE-T Ethernet",
   1014 	  WM_T_82540,		WMP_F_COPPER },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1017 	  "Intel i82545EM 1000BASE-T Ethernet",
   1018 	  WM_T_82545,		WMP_F_COPPER },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1021 	  "Intel i82545GM 1000BASE-T Ethernet",
   1022 	  WM_T_82545_3,		WMP_F_COPPER },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1025 	  "Intel i82545GM 1000BASE-X Ethernet",
   1026 	  WM_T_82545_3,		WMP_F_FIBER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1029 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1030 	  WM_T_82545_3,		WMP_F_SERDES },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1033 	  "Intel i82546EB 1000BASE-T Ethernet",
   1034 	  WM_T_82546,		WMP_F_COPPER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1037 	  "Intel i82546EB 1000BASE-T Ethernet",
   1038 	  WM_T_82546,		WMP_F_COPPER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1041 	  "Intel i82545EM 1000BASE-X Ethernet",
   1042 	  WM_T_82545,		WMP_F_FIBER },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1045 	  "Intel i82546EB 1000BASE-X Ethernet",
   1046 	  WM_T_82546,		WMP_F_FIBER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1049 	  "Intel i82546GB 1000BASE-T Ethernet",
   1050 	  WM_T_82546_3,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1053 	  "Intel i82546GB 1000BASE-X Ethernet",
   1054 	  WM_T_82546_3,		WMP_F_FIBER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1057 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1058 	  WM_T_82546_3,		WMP_F_SERDES },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1061 	  "i82546GB quad-port Gigabit Ethernet",
   1062 	  WM_T_82546_3,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1065 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1066 	  WM_T_82546_3,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1069 	  "Intel PRO/1000MT (82546GB)",
   1070 	  WM_T_82546_3,		WMP_F_COPPER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1073 	  "Intel i82541EI 1000BASE-T Ethernet",
   1074 	  WM_T_82541,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1077 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1078 	  WM_T_82541,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1081 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1082 	  WM_T_82541,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1085 	  "Intel i82541ER 1000BASE-T Ethernet",
   1086 	  WM_T_82541_2,		WMP_F_COPPER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1089 	  "Intel i82541GI 1000BASE-T Ethernet",
   1090 	  WM_T_82541_2,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1093 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1094 	  WM_T_82541_2,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1097 	  "Intel i82541PI 1000BASE-T Ethernet",
   1098 	  WM_T_82541_2,		WMP_F_COPPER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1101 	  "Intel i82547EI 1000BASE-T Ethernet",
   1102 	  WM_T_82547,		WMP_F_COPPER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1105 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1106 	  WM_T_82547,		WMP_F_COPPER },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1109 	  "Intel i82547GI 1000BASE-T Ethernet",
   1110 	  WM_T_82547_2,		WMP_F_COPPER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1113 	  "Intel PRO/1000 PT (82571EB)",
   1114 	  WM_T_82571,		WMP_F_COPPER },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1117 	  "Intel PRO/1000 PF (82571EB)",
   1118 	  WM_T_82571,		WMP_F_FIBER },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1121 	  "Intel PRO/1000 PB (82571EB)",
   1122 	  WM_T_82571,		WMP_F_SERDES },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1125 	  "Intel PRO/1000 QT (82571EB)",
   1126 	  WM_T_82571,		WMP_F_COPPER },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1129 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1130 	  WM_T_82571,		WMP_F_COPPER, },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1133 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1134 	  WM_T_82571,		WMP_F_COPPER, },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1137 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1138 	  WM_T_82571,		WMP_F_SERDES, },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1141 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1142 	  WM_T_82571,		WMP_F_SERDES, },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1145 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1146 	  WM_T_82571,		WMP_F_FIBER, },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1149 	  "Intel i82572EI 1000baseT Ethernet",
   1150 	  WM_T_82572,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1153 	  "Intel i82572EI 1000baseX Ethernet",
   1154 	  WM_T_82572,		WMP_F_FIBER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1157 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1158 	  WM_T_82572,		WMP_F_SERDES },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1161 	  "Intel i82572EI 1000baseT Ethernet",
   1162 	  WM_T_82572,		WMP_F_COPPER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1165 	  "Intel i82573E",
   1166 	  WM_T_82573,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1169 	  "Intel i82573E IAMT",
   1170 	  WM_T_82573,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1173 	  "Intel i82573L Gigabit Ethernet",
   1174 	  WM_T_82573,		WMP_F_COPPER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1177 	  "Intel i82574L",
   1178 	  WM_T_82574,		WMP_F_COPPER },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1181 	  "Intel i82574L",
   1182 	  WM_T_82574,		WMP_F_COPPER },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1185 	  "Intel i82583V",
   1186 	  WM_T_82583,		WMP_F_COPPER },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1189 	  "i80003 dual 1000baseT Ethernet",
   1190 	  WM_T_80003,		WMP_F_COPPER },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1193 	  "i80003 dual 1000baseX Ethernet",
   1194 	  WM_T_80003,		WMP_F_COPPER },
   1195 
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1197 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1198 	  WM_T_80003,		WMP_F_SERDES },
   1199 
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1201 	  "Intel i80003 1000baseT Ethernet",
   1202 	  WM_T_80003,		WMP_F_COPPER },
   1203 
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1205 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1206 	  WM_T_80003,		WMP_F_SERDES },
   1207 
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1209 	  "Intel i82801H (M_AMT) LAN Controller",
   1210 	  WM_T_ICH8,		WMP_F_COPPER },
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1212 	  "Intel i82801H (AMT) LAN Controller",
   1213 	  WM_T_ICH8,		WMP_F_COPPER },
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1215 	  "Intel i82801H LAN Controller",
   1216 	  WM_T_ICH8,		WMP_F_COPPER },
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1218 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1219 	  WM_T_ICH8,		WMP_F_COPPER },
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1221 	  "Intel i82801H (M) LAN Controller",
   1222 	  WM_T_ICH8,		WMP_F_COPPER },
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1224 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1225 	  WM_T_ICH8,		WMP_F_COPPER },
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1227 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1228 	  WM_T_ICH8,		WMP_F_COPPER },
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1230 	  "82567V-3 LAN Controller",
   1231 	  WM_T_ICH8,		WMP_F_COPPER },
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1233 	  "82801I (AMT) LAN Controller",
   1234 	  WM_T_ICH9,		WMP_F_COPPER },
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1236 	  "82801I 10/100 LAN Controller",
   1237 	  WM_T_ICH9,		WMP_F_COPPER },
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1239 	  "82801I (G) 10/100 LAN Controller",
   1240 	  WM_T_ICH9,		WMP_F_COPPER },
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1242 	  "82801I (GT) 10/100 LAN Controller",
   1243 	  WM_T_ICH9,		WMP_F_COPPER },
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1245 	  "82801I (C) LAN Controller",
   1246 	  WM_T_ICH9,		WMP_F_COPPER },
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1248 	  "82801I mobile LAN Controller",
   1249 	  WM_T_ICH9,		WMP_F_COPPER },
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1251 	  "82801I mobile (V) LAN Controller",
   1252 	  WM_T_ICH9,		WMP_F_COPPER },
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1254 	  "82801I mobile (AMT) LAN Controller",
   1255 	  WM_T_ICH9,		WMP_F_COPPER },
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1257 	  "82567LM-4 LAN Controller",
   1258 	  WM_T_ICH9,		WMP_F_COPPER },
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1260 	  "82567LM-2 LAN Controller",
   1261 	  WM_T_ICH10,		WMP_F_COPPER },
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1263 	  "82567LF-2 LAN Controller",
   1264 	  WM_T_ICH10,		WMP_F_COPPER },
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1266 	  "82567LM-3 LAN Controller",
   1267 	  WM_T_ICH10,		WMP_F_COPPER },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1269 	  "82567LF-3 LAN Controller",
   1270 	  WM_T_ICH10,		WMP_F_COPPER },
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1272 	  "82567V-2 LAN Controller",
   1273 	  WM_T_ICH10,		WMP_F_COPPER },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1275 	  "82567V-3? LAN Controller",
   1276 	  WM_T_ICH10,		WMP_F_COPPER },
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1278 	  "HANKSVILLE LAN Controller",
   1279 	  WM_T_ICH10,		WMP_F_COPPER },
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1281 	  "PCH LAN (82577LM) Controller",
   1282 	  WM_T_PCH,		WMP_F_COPPER },
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1284 	  "PCH LAN (82577LC) Controller",
   1285 	  WM_T_PCH,		WMP_F_COPPER },
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1287 	  "PCH LAN (82578DM) Controller",
   1288 	  WM_T_PCH,		WMP_F_COPPER },
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1290 	  "PCH LAN (82578DC) Controller",
   1291 	  WM_T_PCH,		WMP_F_COPPER },
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1293 	  "PCH2 LAN (82579LM) Controller",
   1294 	  WM_T_PCH2,		WMP_F_COPPER },
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1296 	  "PCH2 LAN (82579V) Controller",
   1297 	  WM_T_PCH2,		WMP_F_COPPER },
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1299 	  "82575EB dual-1000baseT Ethernet",
   1300 	  WM_T_82575,		WMP_F_COPPER },
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1302 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1303 	  WM_T_82575,		WMP_F_SERDES },
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1305 	  "82575GB quad-1000baseT Ethernet",
   1306 	  WM_T_82575,		WMP_F_COPPER },
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1308 	  "82575GB quad-1000baseT Ethernet (PM)",
   1309 	  WM_T_82575,		WMP_F_COPPER },
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1311 	  "82576 1000BaseT Ethernet",
   1312 	  WM_T_82576,		WMP_F_COPPER },
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1314 	  "82576 1000BaseX Ethernet",
   1315 	  WM_T_82576,		WMP_F_FIBER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1318 	  "82576 gigabit Ethernet (SERDES)",
   1319 	  WM_T_82576,		WMP_F_SERDES },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1322 	  "82576 quad-1000BaseT Ethernet",
   1323 	  WM_T_82576,		WMP_F_COPPER },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1326 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1327 	  WM_T_82576,		WMP_F_COPPER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1330 	  "82576 gigabit Ethernet",
   1331 	  WM_T_82576,		WMP_F_COPPER },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1334 	  "82576 gigabit Ethernet (SERDES)",
   1335 	  WM_T_82576,		WMP_F_SERDES },
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1337 	  "82576 quad-gigabit Ethernet (SERDES)",
   1338 	  WM_T_82576,		WMP_F_SERDES },
   1339 
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1341 	  "82580 1000BaseT Ethernet",
   1342 	  WM_T_82580,		WMP_F_COPPER },
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1344 	  "82580 1000BaseX Ethernet",
   1345 	  WM_T_82580,		WMP_F_FIBER },
   1346 
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1348 	  "82580 1000BaseT Ethernet (SERDES)",
   1349 	  WM_T_82580,		WMP_F_SERDES },
   1350 
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1352 	  "82580 gigabit Ethernet (SGMII)",
   1353 	  WM_T_82580,		WMP_F_COPPER },
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1355 	  "82580 dual-1000BaseT Ethernet",
   1356 	  WM_T_82580,		WMP_F_COPPER },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1359 	  "82580 quad-1000BaseX Ethernet",
   1360 	  WM_T_82580,		WMP_F_FIBER },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1363 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1364 	  WM_T_82580,		WMP_F_COPPER },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1367 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1368 	  WM_T_82580,		WMP_F_SERDES },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1371 	  "DH89XXCC 1000BASE-KX Ethernet",
   1372 	  WM_T_82580,		WMP_F_SERDES },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1375 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1376 	  WM_T_82580,		WMP_F_SERDES },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1379 	  "I350 Gigabit Network Connection",
   1380 	  WM_T_I350,		WMP_F_COPPER },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1383 	  "I350 Gigabit Fiber Network Connection",
   1384 	  WM_T_I350,		WMP_F_FIBER },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1387 	  "I350 Gigabit Backplane Connection",
   1388 	  WM_T_I350,		WMP_F_SERDES },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1391 	  "I350 Quad Port Gigabit Ethernet",
   1392 	  WM_T_I350,		WMP_F_SERDES },
   1393 
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1395 	  "I350 Gigabit Connection",
   1396 	  WM_T_I350,		WMP_F_COPPER },
   1397 
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1399 	  "I354 Gigabit Ethernet (KX)",
   1400 	  WM_T_I354,		WMP_F_SERDES },
   1401 
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1403 	  "I354 Gigabit Ethernet (SGMII)",
   1404 	  WM_T_I354,		WMP_F_COPPER },
   1405 
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1407 	  "I354 Gigabit Ethernet (2.5G)",
   1408 	  WM_T_I354,		WMP_F_COPPER },
   1409 
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1411 	  "I210-T1 Ethernet Server Adapter",
   1412 	  WM_T_I210,		WMP_F_COPPER },
   1413 
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1415 	  "I210 Ethernet (Copper OEM)",
   1416 	  WM_T_I210,		WMP_F_COPPER },
   1417 
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1419 	  "I210 Ethernet (Copper IT)",
   1420 	  WM_T_I210,		WMP_F_COPPER },
   1421 
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1423 	  "I210 Ethernet (FLASH less)",
   1424 	  WM_T_I210,		WMP_F_COPPER },
   1425 
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1427 	  "I210 Gigabit Ethernet (Fiber)",
   1428 	  WM_T_I210,		WMP_F_FIBER },
   1429 
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1431 	  "I210 Gigabit Ethernet (SERDES)",
   1432 	  WM_T_I210,		WMP_F_SERDES },
   1433 
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1435 	  "I210 Gigabit Ethernet (FLASH less)",
   1436 	  WM_T_I210,		WMP_F_SERDES },
   1437 
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1439 	  "I210 Gigabit Ethernet (SGMII)",
   1440 	  WM_T_I210,		WMP_F_COPPER },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1443 	  "I211 Ethernet (COPPER)",
   1444 	  WM_T_I211,		WMP_F_COPPER },
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1446 	  "I217 V Ethernet Connection",
   1447 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1449 	  "I217 LM Ethernet Connection",
   1450 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1451 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1452 	  "I218 V Ethernet Connection",
   1453 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1455 	  "I218 V Ethernet Connection",
   1456 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1458 	  "I218 V Ethernet Connection",
   1459 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1461 	  "I218 LM Ethernet Connection",
   1462 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1464 	  "I218 LM Ethernet Connection",
   1465 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1467 	  "I218 LM Ethernet Connection",
   1468 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1469 #if 0
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1471 	  "I219 V Ethernet Connection",
   1472 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1474 	  "I219 V Ethernet Connection",
   1475 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1477 	  "I219 V Ethernet Connection",
   1478 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1480 	  "I219 V Ethernet Connection",
   1481 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1483 	  "I219 LM Ethernet Connection",
   1484 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1486 	  "I219 LM Ethernet Connection",
   1487 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1489 	  "I219 LM Ethernet Connection",
   1490 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1492 	  "I219 LM Ethernet Connection",
   1493 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1495 	  "I219 LM Ethernet Connection",
   1496 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1497 #endif
   1498 	{ 0,			0,
   1499 	  NULL,
   1500 	  0,			0 },
   1501 };
   1502 
   1503 /*
   1504  * Register read/write functions.
   1505  * Other than CSR_{READ|WRITE}().
   1506  */
   1507 
   1508 #if 0 /* Not currently used */
   1509 static inline uint32_t
   1510 wm_io_read(struct wm_softc *sc, int reg)
   1511 {
   1512 
   1513 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1514 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1515 }
   1516 #endif
   1517 
   1518 static inline void
   1519 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1520 {
   1521 
   1522 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1523 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1524 }
   1525 
   1526 static inline void
   1527 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1528     uint32_t data)
   1529 {
   1530 	uint32_t regval;
   1531 	int i;
   1532 
   1533 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1534 
   1535 	CSR_WRITE(sc, reg, regval);
   1536 
   1537 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1538 		delay(5);
   1539 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1540 			break;
   1541 	}
   1542 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1543 		aprint_error("%s: WARNING:"
   1544 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1545 		    device_xname(sc->sc_dev), reg);
   1546 	}
   1547 }
   1548 
   1549 static inline void
   1550 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1551 {
   1552 	wa->wa_low = htole32(v & 0xffffffffU);
   1553 	if (sizeof(bus_addr_t) == 8)
   1554 		wa->wa_high = htole32((uint64_t) v >> 32);
   1555 	else
   1556 		wa->wa_high = 0;
   1557 }
   1558 
   1559 /*
   1560  * Descriptor sync/init functions.
   1561  */
   1562 static inline void
   1563 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1564 {
   1565 	struct wm_softc *sc = txq->txq_sc;
   1566 
   1567 	/* If it will wrap around, sync to the end of the ring. */
   1568 	if ((start + num) > WM_NTXDESC(txq)) {
   1569 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1570 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1571 		    (WM_NTXDESC(txq) - start), ops);
   1572 		num -= (WM_NTXDESC(txq) - start);
   1573 		start = 0;
   1574 	}
   1575 
   1576 	/* Now sync whatever is left. */
   1577 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1578 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1579 }
   1580 
   1581 static inline void
   1582 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1583 {
   1584 	struct wm_softc *sc = rxq->rxq_sc;
   1585 
   1586 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1587 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1588 }
   1589 
   1590 static inline void
   1591 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1592 {
   1593 	struct wm_softc *sc = rxq->rxq_sc;
   1594 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1595 	struct mbuf *m = rxs->rxs_mbuf;
   1596 
   1597 	/*
   1598 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1599 	 * so that the payload after the Ethernet header is aligned
   1600 	 * to a 4-byte boundary.
   1601 
   1602 	 * XXX BRAINDAMAGE ALERT!
   1603 	 * The stupid chip uses the same size for every buffer, which
   1604 	 * is set in the Receive Control register.  We are using the 2K
   1605 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1606 	 * reason, we can't "scoot" packets longer than the standard
   1607 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1608 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1609 	 * the upper layer copy the headers.
   1610 	 */
   1611 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1612 
   1613 	if (sc->sc_type == WM_T_82574) {
   1614 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1615 		rxd->erx_data.erxd_addr =
   1616 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1617 		rxd->erx_data.erxd_dd = 0;
   1618 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1619 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1620 
   1621 		rxd->nqrx_data.nrxd_paddr =
   1622 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1623 		/* Currently, split header is not supported. */
   1624 		rxd->nqrx_data.nrxd_haddr = 0;
   1625 	} else {
   1626 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1627 
   1628 		wm_set_dma_addr(&rxd->wrx_addr,
   1629 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1630 		rxd->wrx_len = 0;
   1631 		rxd->wrx_cksum = 0;
   1632 		rxd->wrx_status = 0;
   1633 		rxd->wrx_errors = 0;
   1634 		rxd->wrx_special = 0;
   1635 	}
   1636 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1637 
   1638 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1639 }
   1640 
   1641 /*
   1642  * Device driver interface functions and commonly used functions.
   1643  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1644  */
   1645 
   1646 /* Lookup supported device table */
   1647 static const struct wm_product *
   1648 wm_lookup(const struct pci_attach_args *pa)
   1649 {
   1650 	const struct wm_product *wmp;
   1651 
   1652 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1653 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1654 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1655 			return wmp;
   1656 	}
   1657 	return NULL;
   1658 }
   1659 
   1660 /* The match function (ca_match) */
   1661 static int
   1662 wm_match(device_t parent, cfdata_t cf, void *aux)
   1663 {
   1664 	struct pci_attach_args *pa = aux;
   1665 
   1666 	if (wm_lookup(pa) != NULL)
   1667 		return 1;
   1668 
   1669 	return 0;
   1670 }
   1671 
   1672 /* The attach function (ca_attach) */
   1673 static void
   1674 wm_attach(device_t parent, device_t self, void *aux)
   1675 {
   1676 	struct wm_softc *sc = device_private(self);
   1677 	struct pci_attach_args *pa = aux;
   1678 	prop_dictionary_t dict;
   1679 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1680 	pci_chipset_tag_t pc = pa->pa_pc;
   1681 	int counts[PCI_INTR_TYPE_SIZE];
   1682 	pci_intr_type_t max_type;
   1683 	const char *eetype, *xname;
   1684 	bus_space_tag_t memt;
   1685 	bus_space_handle_t memh;
   1686 	bus_size_t memsize;
   1687 	int memh_valid;
   1688 	int i, error;
   1689 	const struct wm_product *wmp;
   1690 	prop_data_t ea;
   1691 	prop_number_t pn;
   1692 	uint8_t enaddr[ETHER_ADDR_LEN];
   1693 	char buf[256];
   1694 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1695 	pcireg_t preg, memtype;
   1696 	uint16_t eeprom_data, apme_mask;
   1697 	bool force_clear_smbi;
   1698 	uint32_t link_mode;
   1699 	uint32_t reg;
   1700 
   1701 	sc->sc_dev = self;
   1702 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1703 	sc->sc_core_stopping = false;
   1704 
   1705 	wmp = wm_lookup(pa);
   1706 #ifdef DIAGNOSTIC
   1707 	if (wmp == NULL) {
   1708 		printf("\n");
   1709 		panic("wm_attach: impossible");
   1710 	}
   1711 #endif
   1712 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1713 
   1714 	sc->sc_pc = pa->pa_pc;
   1715 	sc->sc_pcitag = pa->pa_tag;
   1716 
   1717 	if (pci_dma64_available(pa))
   1718 		sc->sc_dmat = pa->pa_dmat64;
   1719 	else
   1720 		sc->sc_dmat = pa->pa_dmat;
   1721 
   1722 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1723 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1724 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1725 
   1726 	sc->sc_type = wmp->wmp_type;
   1727 
   1728 	/* Set default function pointers */
   1729 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1730 	sc->phy.release = sc->nvm.release = wm_put_null;
   1731 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1732 
   1733 	if (sc->sc_type < WM_T_82543) {
   1734 		if (sc->sc_rev < 2) {
   1735 			aprint_error_dev(sc->sc_dev,
   1736 			    "i82542 must be at least rev. 2\n");
   1737 			return;
   1738 		}
   1739 		if (sc->sc_rev < 3)
   1740 			sc->sc_type = WM_T_82542_2_0;
   1741 	}
   1742 
   1743 	/*
   1744 	 * Disable MSI for Errata:
   1745 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1746 	 *
   1747 	 *  82544: Errata 25
   1748 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1749 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1750 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1751 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1752 	 *
   1753 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1754 	 *
   1755 	 *  82571 & 82572: Errata 63
   1756 	 */
   1757 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1758 	    || (sc->sc_type == WM_T_82572))
   1759 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1760 
   1761 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1762 	    || (sc->sc_type == WM_T_82580)
   1763 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1764 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1765 		sc->sc_flags |= WM_F_NEWQUEUE;
   1766 
   1767 	/* Set device properties (mactype) */
   1768 	dict = device_properties(sc->sc_dev);
   1769 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1770 
   1771 	/*
   1772 	 * Map the device.  All devices support memory-mapped acccess,
   1773 	 * and it is really required for normal operation.
   1774 	 */
   1775 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1776 	switch (memtype) {
   1777 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1778 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1779 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1780 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1781 		break;
   1782 	default:
   1783 		memh_valid = 0;
   1784 		break;
   1785 	}
   1786 
   1787 	if (memh_valid) {
   1788 		sc->sc_st = memt;
   1789 		sc->sc_sh = memh;
   1790 		sc->sc_ss = memsize;
   1791 	} else {
   1792 		aprint_error_dev(sc->sc_dev,
   1793 		    "unable to map device registers\n");
   1794 		return;
   1795 	}
   1796 
   1797 	/*
   1798 	 * In addition, i82544 and later support I/O mapped indirect
   1799 	 * register access.  It is not desirable (nor supported in
   1800 	 * this driver) to use it for normal operation, though it is
   1801 	 * required to work around bugs in some chip versions.
   1802 	 */
   1803 	if (sc->sc_type >= WM_T_82544) {
   1804 		/* First we have to find the I/O BAR. */
   1805 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1806 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1807 			if (memtype == PCI_MAPREG_TYPE_IO)
   1808 				break;
   1809 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1810 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1811 				i += 4;	/* skip high bits, too */
   1812 		}
   1813 		if (i < PCI_MAPREG_END) {
   1814 			/*
   1815 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1816 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1817 			 * It's no problem because newer chips has no this
   1818 			 * bug.
   1819 			 *
   1820 			 * The i8254x doesn't apparently respond when the
   1821 			 * I/O BAR is 0, which looks somewhat like it's not
   1822 			 * been configured.
   1823 			 */
   1824 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1825 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1826 				aprint_error_dev(sc->sc_dev,
   1827 				    "WARNING: I/O BAR at zero.\n");
   1828 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1829 					0, &sc->sc_iot, &sc->sc_ioh,
   1830 					NULL, &sc->sc_ios) == 0) {
   1831 				sc->sc_flags |= WM_F_IOH_VALID;
   1832 			} else {
   1833 				aprint_error_dev(sc->sc_dev,
   1834 				    "WARNING: unable to map I/O space\n");
   1835 			}
   1836 		}
   1837 
   1838 	}
   1839 
   1840 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1841 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1842 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1843 	if (sc->sc_type < WM_T_82542_2_1)
   1844 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1845 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1846 
   1847 	/* power up chip */
   1848 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1849 	    NULL)) && error != EOPNOTSUPP) {
   1850 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1851 		return;
   1852 	}
   1853 
   1854 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1855 	/*
   1856 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1857 	 * resource.
   1858 	 */
   1859 	if (sc->sc_nqueues > 1) {
   1860 		max_type = PCI_INTR_TYPE_MSIX;
   1861 		/*
   1862 		 *  82583 has a MSI-X capability in the PCI configuration space
   1863 		 * but it doesn't support it. At least the document doesn't
   1864 		 * say anything about MSI-X.
   1865 		 */
   1866 		counts[PCI_INTR_TYPE_MSIX]
   1867 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1868 	} else {
   1869 		max_type = PCI_INTR_TYPE_MSI;
   1870 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1871 	}
   1872 
   1873 	/* Allocation settings */
   1874 	counts[PCI_INTR_TYPE_MSI] = 1;
   1875 	counts[PCI_INTR_TYPE_INTX] = 1;
   1876 	/* overridden by disable flags */
   1877 	if (wm_disable_msi != 0) {
   1878 		counts[PCI_INTR_TYPE_MSI] = 0;
   1879 		if (wm_disable_msix != 0) {
   1880 			max_type = PCI_INTR_TYPE_INTX;
   1881 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1882 		}
   1883 	} else if (wm_disable_msix != 0) {
   1884 		max_type = PCI_INTR_TYPE_MSI;
   1885 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1886 	}
   1887 
   1888 alloc_retry:
   1889 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1890 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1891 		return;
   1892 	}
   1893 
   1894 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1895 		error = wm_setup_msix(sc);
   1896 		if (error) {
   1897 			pci_intr_release(pc, sc->sc_intrs,
   1898 			    counts[PCI_INTR_TYPE_MSIX]);
   1899 
   1900 			/* Setup for MSI: Disable MSI-X */
   1901 			max_type = PCI_INTR_TYPE_MSI;
   1902 			counts[PCI_INTR_TYPE_MSI] = 1;
   1903 			counts[PCI_INTR_TYPE_INTX] = 1;
   1904 			goto alloc_retry;
   1905 		}
   1906 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1907 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1908 		error = wm_setup_legacy(sc);
   1909 		if (error) {
   1910 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1911 			    counts[PCI_INTR_TYPE_MSI]);
   1912 
   1913 			/* The next try is for INTx: Disable MSI */
   1914 			max_type = PCI_INTR_TYPE_INTX;
   1915 			counts[PCI_INTR_TYPE_INTX] = 1;
   1916 			goto alloc_retry;
   1917 		}
   1918 	} else {
   1919 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1920 		error = wm_setup_legacy(sc);
   1921 		if (error) {
   1922 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1923 			    counts[PCI_INTR_TYPE_INTX]);
   1924 			return;
   1925 		}
   1926 	}
   1927 
   1928 	/*
   1929 	 * Check the function ID (unit number of the chip).
   1930 	 */
   1931 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1932 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1933 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1934 	    || (sc->sc_type == WM_T_82580)
   1935 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1936 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1937 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1938 	else
   1939 		sc->sc_funcid = 0;
   1940 
   1941 	/*
   1942 	 * Determine a few things about the bus we're connected to.
   1943 	 */
   1944 	if (sc->sc_type < WM_T_82543) {
   1945 		/* We don't really know the bus characteristics here. */
   1946 		sc->sc_bus_speed = 33;
   1947 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1948 		/*
   1949 		 * CSA (Communication Streaming Architecture) is about as fast
   1950 		 * a 32-bit 66MHz PCI Bus.
   1951 		 */
   1952 		sc->sc_flags |= WM_F_CSA;
   1953 		sc->sc_bus_speed = 66;
   1954 		aprint_verbose_dev(sc->sc_dev,
   1955 		    "Communication Streaming Architecture\n");
   1956 		if (sc->sc_type == WM_T_82547) {
   1957 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1958 			callout_setfunc(&sc->sc_txfifo_ch,
   1959 					wm_82547_txfifo_stall, sc);
   1960 			aprint_verbose_dev(sc->sc_dev,
   1961 			    "using 82547 Tx FIFO stall work-around\n");
   1962 		}
   1963 	} else if (sc->sc_type >= WM_T_82571) {
   1964 		sc->sc_flags |= WM_F_PCIE;
   1965 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1966 		    && (sc->sc_type != WM_T_ICH10)
   1967 		    && (sc->sc_type != WM_T_PCH)
   1968 		    && (sc->sc_type != WM_T_PCH2)
   1969 		    && (sc->sc_type != WM_T_PCH_LPT)
   1970 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1971 			/* ICH* and PCH* have no PCIe capability registers */
   1972 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1973 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1974 				NULL) == 0)
   1975 				aprint_error_dev(sc->sc_dev,
   1976 				    "unable to find PCIe capability\n");
   1977 		}
   1978 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1979 	} else {
   1980 		reg = CSR_READ(sc, WMREG_STATUS);
   1981 		if (reg & STATUS_BUS64)
   1982 			sc->sc_flags |= WM_F_BUS64;
   1983 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1984 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1985 
   1986 			sc->sc_flags |= WM_F_PCIX;
   1987 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1988 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1989 				aprint_error_dev(sc->sc_dev,
   1990 				    "unable to find PCIX capability\n");
   1991 			else if (sc->sc_type != WM_T_82545_3 &&
   1992 				 sc->sc_type != WM_T_82546_3) {
   1993 				/*
   1994 				 * Work around a problem caused by the BIOS
   1995 				 * setting the max memory read byte count
   1996 				 * incorrectly.
   1997 				 */
   1998 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1999 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2000 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2001 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2002 
   2003 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2004 				    PCIX_CMD_BYTECNT_SHIFT;
   2005 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2006 				    PCIX_STATUS_MAXB_SHIFT;
   2007 				if (bytecnt > maxb) {
   2008 					aprint_verbose_dev(sc->sc_dev,
   2009 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2010 					    512 << bytecnt, 512 << maxb);
   2011 					pcix_cmd = (pcix_cmd &
   2012 					    ~PCIX_CMD_BYTECNT_MASK) |
   2013 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2014 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2015 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2016 					    pcix_cmd);
   2017 				}
   2018 			}
   2019 		}
   2020 		/*
   2021 		 * The quad port adapter is special; it has a PCIX-PCIX
   2022 		 * bridge on the board, and can run the secondary bus at
   2023 		 * a higher speed.
   2024 		 */
   2025 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2026 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2027 								      : 66;
   2028 		} else if (sc->sc_flags & WM_F_PCIX) {
   2029 			switch (reg & STATUS_PCIXSPD_MASK) {
   2030 			case STATUS_PCIXSPD_50_66:
   2031 				sc->sc_bus_speed = 66;
   2032 				break;
   2033 			case STATUS_PCIXSPD_66_100:
   2034 				sc->sc_bus_speed = 100;
   2035 				break;
   2036 			case STATUS_PCIXSPD_100_133:
   2037 				sc->sc_bus_speed = 133;
   2038 				break;
   2039 			default:
   2040 				aprint_error_dev(sc->sc_dev,
   2041 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2042 				    reg & STATUS_PCIXSPD_MASK);
   2043 				sc->sc_bus_speed = 66;
   2044 				break;
   2045 			}
   2046 		} else
   2047 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2048 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2049 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2050 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2051 	}
   2052 
   2053 	/* Disable ASPM L0s and/or L1 for workaround */
   2054 	wm_disable_aspm(sc);
   2055 
   2056 	/* clear interesting stat counters */
   2057 	CSR_READ(sc, WMREG_COLC);
   2058 	CSR_READ(sc, WMREG_RXERRC);
   2059 
   2060 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2061 	    || (sc->sc_type >= WM_T_ICH8))
   2062 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2063 	if (sc->sc_type >= WM_T_ICH8)
   2064 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2065 
   2066 	/* Set PHY, NVM mutex related stuff */
   2067 	switch (sc->sc_type) {
   2068 	case WM_T_82542_2_0:
   2069 	case WM_T_82542_2_1:
   2070 	case WM_T_82543:
   2071 	case WM_T_82544:
   2072 		/* Microwire */
   2073 		sc->nvm.read = wm_nvm_read_uwire;
   2074 		sc->sc_nvm_wordsize = 64;
   2075 		sc->sc_nvm_addrbits = 6;
   2076 		break;
   2077 	case WM_T_82540:
   2078 	case WM_T_82545:
   2079 	case WM_T_82545_3:
   2080 	case WM_T_82546:
   2081 	case WM_T_82546_3:
   2082 		/* Microwire */
   2083 		sc->nvm.read = wm_nvm_read_uwire;
   2084 		reg = CSR_READ(sc, WMREG_EECD);
   2085 		if (reg & EECD_EE_SIZE) {
   2086 			sc->sc_nvm_wordsize = 256;
   2087 			sc->sc_nvm_addrbits = 8;
   2088 		} else {
   2089 			sc->sc_nvm_wordsize = 64;
   2090 			sc->sc_nvm_addrbits = 6;
   2091 		}
   2092 		sc->sc_flags |= WM_F_LOCK_EECD;
   2093 		sc->nvm.acquire = wm_get_eecd;
   2094 		sc->nvm.release = wm_put_eecd;
   2095 		break;
   2096 	case WM_T_82541:
   2097 	case WM_T_82541_2:
   2098 	case WM_T_82547:
   2099 	case WM_T_82547_2:
   2100 		reg = CSR_READ(sc, WMREG_EECD);
   2101 		/*
   2102 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2103 		 * on 8254[17], so set flags and functios before calling it.
   2104 		 */
   2105 		sc->sc_flags |= WM_F_LOCK_EECD;
   2106 		sc->nvm.acquire = wm_get_eecd;
   2107 		sc->nvm.release = wm_put_eecd;
   2108 		if (reg & EECD_EE_TYPE) {
   2109 			/* SPI */
   2110 			sc->nvm.read = wm_nvm_read_spi;
   2111 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2112 			wm_nvm_set_addrbits_size_eecd(sc);
   2113 		} else {
   2114 			/* Microwire */
   2115 			sc->nvm.read = wm_nvm_read_uwire;
   2116 			if ((reg & EECD_EE_ABITS) != 0) {
   2117 				sc->sc_nvm_wordsize = 256;
   2118 				sc->sc_nvm_addrbits = 8;
   2119 			} else {
   2120 				sc->sc_nvm_wordsize = 64;
   2121 				sc->sc_nvm_addrbits = 6;
   2122 			}
   2123 		}
   2124 		break;
   2125 	case WM_T_82571:
   2126 	case WM_T_82572:
   2127 		/* SPI */
   2128 		sc->nvm.read = wm_nvm_read_eerd;
   2129 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2130 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2131 		wm_nvm_set_addrbits_size_eecd(sc);
   2132 		sc->phy.acquire = wm_get_swsm_semaphore;
   2133 		sc->phy.release = wm_put_swsm_semaphore;
   2134 		sc->nvm.acquire = wm_get_nvm_82571;
   2135 		sc->nvm.release = wm_put_nvm_82571;
   2136 		break;
   2137 	case WM_T_82573:
   2138 	case WM_T_82574:
   2139 	case WM_T_82583:
   2140 		sc->nvm.read = wm_nvm_read_eerd;
   2141 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2142 		if (sc->sc_type == WM_T_82573) {
   2143 			sc->phy.acquire = wm_get_swsm_semaphore;
   2144 			sc->phy.release = wm_put_swsm_semaphore;
   2145 			sc->nvm.acquire = wm_get_nvm_82571;
   2146 			sc->nvm.release = wm_put_nvm_82571;
   2147 		} else {
   2148 			/* Both PHY and NVM use the same semaphore. */
   2149 			sc->phy.acquire = sc->nvm.acquire
   2150 			    = wm_get_swfwhw_semaphore;
   2151 			sc->phy.release = sc->nvm.release
   2152 			    = wm_put_swfwhw_semaphore;
   2153 		}
   2154 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2155 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2156 			sc->sc_nvm_wordsize = 2048;
   2157 		} else {
   2158 			/* SPI */
   2159 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2160 			wm_nvm_set_addrbits_size_eecd(sc);
   2161 		}
   2162 		break;
   2163 	case WM_T_82575:
   2164 	case WM_T_82576:
   2165 	case WM_T_82580:
   2166 	case WM_T_I350:
   2167 	case WM_T_I354:
   2168 	case WM_T_80003:
   2169 		/* SPI */
   2170 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2171 		wm_nvm_set_addrbits_size_eecd(sc);
   2172 		if((sc->sc_type == WM_T_80003)
   2173 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2174 			sc->nvm.read = wm_nvm_read_eerd;
   2175 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2176 		} else {
   2177 			sc->nvm.read = wm_nvm_read_spi;
   2178 			sc->sc_flags |= WM_F_LOCK_EECD;
   2179 		}
   2180 		sc->phy.acquire = wm_get_phy_82575;
   2181 		sc->phy.release = wm_put_phy_82575;
   2182 		sc->nvm.acquire = wm_get_nvm_80003;
   2183 		sc->nvm.release = wm_put_nvm_80003;
   2184 		break;
   2185 	case WM_T_ICH8:
   2186 	case WM_T_ICH9:
   2187 	case WM_T_ICH10:
   2188 	case WM_T_PCH:
   2189 	case WM_T_PCH2:
   2190 	case WM_T_PCH_LPT:
   2191 		sc->nvm.read = wm_nvm_read_ich8;
   2192 		/* FLASH */
   2193 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2194 		sc->sc_nvm_wordsize = 2048;
   2195 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2196 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2197 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2198 			aprint_error_dev(sc->sc_dev,
   2199 			    "can't map FLASH registers\n");
   2200 			goto out;
   2201 		}
   2202 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2203 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2204 		    ICH_FLASH_SECTOR_SIZE;
   2205 		sc->sc_ich8_flash_bank_size =
   2206 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2207 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2208 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2209 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2210 		sc->sc_flashreg_offset = 0;
   2211 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2212 		sc->phy.release = wm_put_swflag_ich8lan;
   2213 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2214 		sc->nvm.release = wm_put_nvm_ich8lan;
   2215 		break;
   2216 	case WM_T_PCH_SPT:
   2217 		sc->nvm.read = wm_nvm_read_spt;
   2218 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2219 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2220 		sc->sc_flasht = sc->sc_st;
   2221 		sc->sc_flashh = sc->sc_sh;
   2222 		sc->sc_ich8_flash_base = 0;
   2223 		sc->sc_nvm_wordsize =
   2224 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2225 			* NVM_SIZE_MULTIPLIER;
   2226 		/* It is size in bytes, we want words */
   2227 		sc->sc_nvm_wordsize /= 2;
   2228 		/* assume 2 banks */
   2229 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2230 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2231 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2232 		sc->phy.release = wm_put_swflag_ich8lan;
   2233 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2234 		sc->nvm.release = wm_put_nvm_ich8lan;
   2235 		break;
   2236 	case WM_T_I210:
   2237 	case WM_T_I211:
   2238 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2239 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2240 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2241 			sc->nvm.read = wm_nvm_read_eerd;
   2242 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2243 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2244 			wm_nvm_set_addrbits_size_eecd(sc);
   2245 		} else {
   2246 			sc->nvm.read = wm_nvm_read_invm;
   2247 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2248 			sc->sc_nvm_wordsize = INVM_SIZE;
   2249 		}
   2250 		sc->phy.acquire = wm_get_phy_82575;
   2251 		sc->phy.release = wm_put_phy_82575;
   2252 		sc->nvm.acquire = wm_get_nvm_80003;
   2253 		sc->nvm.release = wm_put_nvm_80003;
   2254 		break;
   2255 	default:
   2256 		break;
   2257 	}
   2258 
   2259 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2260 	switch (sc->sc_type) {
   2261 	case WM_T_82571:
   2262 	case WM_T_82572:
   2263 		reg = CSR_READ(sc, WMREG_SWSM2);
   2264 		if ((reg & SWSM2_LOCK) == 0) {
   2265 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2266 			force_clear_smbi = true;
   2267 		} else
   2268 			force_clear_smbi = false;
   2269 		break;
   2270 	case WM_T_82573:
   2271 	case WM_T_82574:
   2272 	case WM_T_82583:
   2273 		force_clear_smbi = true;
   2274 		break;
   2275 	default:
   2276 		force_clear_smbi = false;
   2277 		break;
   2278 	}
   2279 	if (force_clear_smbi) {
   2280 		reg = CSR_READ(sc, WMREG_SWSM);
   2281 		if ((reg & SWSM_SMBI) != 0)
   2282 			aprint_error_dev(sc->sc_dev,
   2283 			    "Please update the Bootagent\n");
   2284 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2285 	}
   2286 
   2287 	/*
   2288 	 * Defer printing the EEPROM type until after verifying the checksum
   2289 	 * This allows the EEPROM type to be printed correctly in the case
   2290 	 * that no EEPROM is attached.
   2291 	 */
   2292 	/*
   2293 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2294 	 * this for later, so we can fail future reads from the EEPROM.
   2295 	 */
   2296 	if (wm_nvm_validate_checksum(sc)) {
   2297 		/*
   2298 		 * Read twice again because some PCI-e parts fail the
   2299 		 * first check due to the link being in sleep state.
   2300 		 */
   2301 		if (wm_nvm_validate_checksum(sc))
   2302 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2303 	}
   2304 
   2305 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2306 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2307 	else {
   2308 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2309 		    sc->sc_nvm_wordsize);
   2310 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2311 			aprint_verbose("iNVM");
   2312 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2313 			aprint_verbose("FLASH(HW)");
   2314 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2315 			aprint_verbose("FLASH");
   2316 		else {
   2317 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2318 				eetype = "SPI";
   2319 			else
   2320 				eetype = "MicroWire";
   2321 			aprint_verbose("(%d address bits) %s EEPROM",
   2322 			    sc->sc_nvm_addrbits, eetype);
   2323 		}
   2324 	}
   2325 	wm_nvm_version(sc);
   2326 	aprint_verbose("\n");
   2327 
   2328 	/*
   2329 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2330 	 * incorrect.
   2331 	 */
   2332 	wm_gmii_setup_phytype(sc, 0, 0);
   2333 
   2334 	/* Reset the chip to a known state. */
   2335 	wm_reset(sc);
   2336 
   2337 	/* Check for I21[01] PLL workaround */
   2338 	if (sc->sc_type == WM_T_I210)
   2339 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2340 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2341 		/* NVM image release 3.25 has a workaround */
   2342 		if ((sc->sc_nvm_ver_major < 3)
   2343 		    || ((sc->sc_nvm_ver_major == 3)
   2344 			&& (sc->sc_nvm_ver_minor < 25))) {
   2345 			aprint_verbose_dev(sc->sc_dev,
   2346 			    "ROM image version %d.%d is older than 3.25\n",
   2347 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2348 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2349 		}
   2350 	}
   2351 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2352 		wm_pll_workaround_i210(sc);
   2353 
   2354 	wm_get_wakeup(sc);
   2355 
   2356 	/* Non-AMT based hardware can now take control from firmware */
   2357 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2358 		wm_get_hw_control(sc);
   2359 
   2360 	/*
   2361 	 * Read the Ethernet address from the EEPROM, if not first found
   2362 	 * in device properties.
   2363 	 */
   2364 	ea = prop_dictionary_get(dict, "mac-address");
   2365 	if (ea != NULL) {
   2366 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2367 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2368 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2369 	} else {
   2370 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2371 			aprint_error_dev(sc->sc_dev,
   2372 			    "unable to read Ethernet address\n");
   2373 			goto out;
   2374 		}
   2375 	}
   2376 
   2377 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2378 	    ether_sprintf(enaddr));
   2379 
   2380 	/*
   2381 	 * Read the config info from the EEPROM, and set up various
   2382 	 * bits in the control registers based on their contents.
   2383 	 */
   2384 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2385 	if (pn != NULL) {
   2386 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2387 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2388 	} else {
   2389 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2390 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2391 			goto out;
   2392 		}
   2393 	}
   2394 
   2395 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2396 	if (pn != NULL) {
   2397 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2398 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2399 	} else {
   2400 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2401 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2402 			goto out;
   2403 		}
   2404 	}
   2405 
   2406 	/* check for WM_F_WOL */
   2407 	switch (sc->sc_type) {
   2408 	case WM_T_82542_2_0:
   2409 	case WM_T_82542_2_1:
   2410 	case WM_T_82543:
   2411 		/* dummy? */
   2412 		eeprom_data = 0;
   2413 		apme_mask = NVM_CFG3_APME;
   2414 		break;
   2415 	case WM_T_82544:
   2416 		apme_mask = NVM_CFG2_82544_APM_EN;
   2417 		eeprom_data = cfg2;
   2418 		break;
   2419 	case WM_T_82546:
   2420 	case WM_T_82546_3:
   2421 	case WM_T_82571:
   2422 	case WM_T_82572:
   2423 	case WM_T_82573:
   2424 	case WM_T_82574:
   2425 	case WM_T_82583:
   2426 	case WM_T_80003:
   2427 	default:
   2428 		apme_mask = NVM_CFG3_APME;
   2429 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2430 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2431 		break;
   2432 	case WM_T_82575:
   2433 	case WM_T_82576:
   2434 	case WM_T_82580:
   2435 	case WM_T_I350:
   2436 	case WM_T_I354: /* XXX ok? */
   2437 	case WM_T_ICH8:
   2438 	case WM_T_ICH9:
   2439 	case WM_T_ICH10:
   2440 	case WM_T_PCH:
   2441 	case WM_T_PCH2:
   2442 	case WM_T_PCH_LPT:
   2443 	case WM_T_PCH_SPT:
   2444 		/* XXX The funcid should be checked on some devices */
   2445 		apme_mask = WUC_APME;
   2446 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2447 		break;
   2448 	}
   2449 
   2450 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2451 	if ((eeprom_data & apme_mask) != 0)
   2452 		sc->sc_flags |= WM_F_WOL;
   2453 
   2454 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2455 		/* Check NVM for autonegotiation */
   2456 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2457 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2458 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2459 		}
   2460 	}
   2461 
   2462 	/*
   2463 	 * XXX need special handling for some multiple port cards
   2464 	 * to disable a paticular port.
   2465 	 */
   2466 
   2467 	if (sc->sc_type >= WM_T_82544) {
   2468 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2469 		if (pn != NULL) {
   2470 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2471 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2472 		} else {
   2473 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2474 				aprint_error_dev(sc->sc_dev,
   2475 				    "unable to read SWDPIN\n");
   2476 				goto out;
   2477 			}
   2478 		}
   2479 	}
   2480 
   2481 	if (cfg1 & NVM_CFG1_ILOS)
   2482 		sc->sc_ctrl |= CTRL_ILOS;
   2483 
   2484 	/*
   2485 	 * XXX
   2486 	 * This code isn't correct because pin 2 and 3 are located
   2487 	 * in different position on newer chips. Check all datasheet.
   2488 	 *
   2489 	 * Until resolve this problem, check if a chip < 82580
   2490 	 */
   2491 	if (sc->sc_type <= WM_T_82580) {
   2492 		if (sc->sc_type >= WM_T_82544) {
   2493 			sc->sc_ctrl |=
   2494 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2495 			    CTRL_SWDPIO_SHIFT;
   2496 			sc->sc_ctrl |=
   2497 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2498 			    CTRL_SWDPINS_SHIFT;
   2499 		} else {
   2500 			sc->sc_ctrl |=
   2501 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2502 			    CTRL_SWDPIO_SHIFT;
   2503 		}
   2504 	}
   2505 
   2506 	/* XXX For other than 82580? */
   2507 	if (sc->sc_type == WM_T_82580) {
   2508 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2509 		if (nvmword & __BIT(13))
   2510 			sc->sc_ctrl |= CTRL_ILOS;
   2511 	}
   2512 
   2513 #if 0
   2514 	if (sc->sc_type >= WM_T_82544) {
   2515 		if (cfg1 & NVM_CFG1_IPS0)
   2516 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2517 		if (cfg1 & NVM_CFG1_IPS1)
   2518 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2519 		sc->sc_ctrl_ext |=
   2520 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2521 		    CTRL_EXT_SWDPIO_SHIFT;
   2522 		sc->sc_ctrl_ext |=
   2523 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2524 		    CTRL_EXT_SWDPINS_SHIFT;
   2525 	} else {
   2526 		sc->sc_ctrl_ext |=
   2527 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2528 		    CTRL_EXT_SWDPIO_SHIFT;
   2529 	}
   2530 #endif
   2531 
   2532 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2533 #if 0
   2534 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2535 #endif
   2536 
   2537 	if (sc->sc_type == WM_T_PCH) {
   2538 		uint16_t val;
   2539 
   2540 		/* Save the NVM K1 bit setting */
   2541 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2542 
   2543 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2544 			sc->sc_nvm_k1_enabled = 1;
   2545 		else
   2546 			sc->sc_nvm_k1_enabled = 0;
   2547 	}
   2548 
   2549 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2550 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2551 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2552 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2553 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2554 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2555 		/* Copper only */
   2556 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2557 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2558 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2559 	    || (sc->sc_type ==WM_T_I211)) {
   2560 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2561 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2562 		switch (link_mode) {
   2563 		case CTRL_EXT_LINK_MODE_1000KX:
   2564 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2565 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2566 			break;
   2567 		case CTRL_EXT_LINK_MODE_SGMII:
   2568 			if (wm_sgmii_uses_mdio(sc)) {
   2569 				aprint_verbose_dev(sc->sc_dev,
   2570 				    "SGMII(MDIO)\n");
   2571 				sc->sc_flags |= WM_F_SGMII;
   2572 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2573 				break;
   2574 			}
   2575 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2576 			/*FALLTHROUGH*/
   2577 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2578 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2579 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2580 				if (link_mode
   2581 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2582 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2583 					sc->sc_flags |= WM_F_SGMII;
   2584 				} else {
   2585 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2586 					aprint_verbose_dev(sc->sc_dev,
   2587 					    "SERDES\n");
   2588 				}
   2589 				break;
   2590 			}
   2591 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2592 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2593 
   2594 			/* Change current link mode setting */
   2595 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2596 			switch (sc->sc_mediatype) {
   2597 			case WM_MEDIATYPE_COPPER:
   2598 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2599 				break;
   2600 			case WM_MEDIATYPE_SERDES:
   2601 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2602 				break;
   2603 			default:
   2604 				break;
   2605 			}
   2606 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2607 			break;
   2608 		case CTRL_EXT_LINK_MODE_GMII:
   2609 		default:
   2610 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2611 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2612 			break;
   2613 		}
   2614 
   2615 		reg &= ~CTRL_EXT_I2C_ENA;
   2616 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2617 			reg |= CTRL_EXT_I2C_ENA;
   2618 		else
   2619 			reg &= ~CTRL_EXT_I2C_ENA;
   2620 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2621 	} else if (sc->sc_type < WM_T_82543 ||
   2622 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2623 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2624 			aprint_error_dev(sc->sc_dev,
   2625 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2626 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2627 		}
   2628 	} else {
   2629 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2630 			aprint_error_dev(sc->sc_dev,
   2631 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2632 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2633 		}
   2634 	}
   2635 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2636 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2637 
   2638 	/* Set device properties (macflags) */
   2639 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2640 
   2641 	/* Initialize the media structures accordingly. */
   2642 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2643 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2644 	else
   2645 		wm_tbi_mediainit(sc); /* All others */
   2646 
   2647 	ifp = &sc->sc_ethercom.ec_if;
   2648 	xname = device_xname(sc->sc_dev);
   2649 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2650 	ifp->if_softc = sc;
   2651 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2652 #ifdef WM_MPSAFE
   2653 	ifp->if_extflags = IFEF_MPSAFE;
   2654 #endif
   2655 	ifp->if_ioctl = wm_ioctl;
   2656 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2657 		ifp->if_start = wm_nq_start;
   2658 		/*
   2659 		 * When the number of CPUs is one and the controller can use
   2660 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2661 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2662 		 * and the other is used for link status changing.
   2663 		 * In this situation, wm_nq_transmit() is disadvantageous
   2664 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2665 		 */
   2666 		if (wm_is_using_multiqueue(sc))
   2667 			ifp->if_transmit = wm_nq_transmit;
   2668 	} else {
   2669 		ifp->if_start = wm_start;
   2670 		/*
   2671 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2672 		 */
   2673 		if (wm_is_using_multiqueue(sc))
   2674 			ifp->if_transmit = wm_transmit;
   2675 	}
   2676 	ifp->if_watchdog = wm_watchdog;
   2677 	ifp->if_init = wm_init;
   2678 	ifp->if_stop = wm_stop;
   2679 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2680 	IFQ_SET_READY(&ifp->if_snd);
   2681 
   2682 	/* Check for jumbo frame */
   2683 	switch (sc->sc_type) {
   2684 	case WM_T_82573:
   2685 		/* XXX limited to 9234 if ASPM is disabled */
   2686 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2687 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2688 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2689 		break;
   2690 	case WM_T_82571:
   2691 	case WM_T_82572:
   2692 	case WM_T_82574:
   2693 	case WM_T_82583:
   2694 	case WM_T_82575:
   2695 	case WM_T_82576:
   2696 	case WM_T_82580:
   2697 	case WM_T_I350:
   2698 	case WM_T_I354:
   2699 	case WM_T_I210:
   2700 	case WM_T_I211:
   2701 	case WM_T_80003:
   2702 	case WM_T_ICH9:
   2703 	case WM_T_ICH10:
   2704 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2705 	case WM_T_PCH_LPT:
   2706 	case WM_T_PCH_SPT:
   2707 		/* XXX limited to 9234 */
   2708 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2709 		break;
   2710 	case WM_T_PCH:
   2711 		/* XXX limited to 4096 */
   2712 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2713 		break;
   2714 	case WM_T_82542_2_0:
   2715 	case WM_T_82542_2_1:
   2716 	case WM_T_ICH8:
   2717 		/* No support for jumbo frame */
   2718 		break;
   2719 	default:
   2720 		/* ETHER_MAX_LEN_JUMBO */
   2721 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2722 		break;
   2723 	}
   2724 
   2725 	/* If we're a i82543 or greater, we can support VLANs. */
   2726 	if (sc->sc_type >= WM_T_82543)
   2727 		sc->sc_ethercom.ec_capabilities |=
   2728 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2729 
   2730 	/*
   2731 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2732 	 * on i82543 and later.
   2733 	 */
   2734 	if (sc->sc_type >= WM_T_82543) {
   2735 		ifp->if_capabilities |=
   2736 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2737 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2738 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2739 		    IFCAP_CSUM_TCPv6_Tx |
   2740 		    IFCAP_CSUM_UDPv6_Tx;
   2741 	}
   2742 
   2743 	/*
   2744 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2745 	 *
   2746 	 *	82541GI (8086:1076) ... no
   2747 	 *	82572EI (8086:10b9) ... yes
   2748 	 */
   2749 	if (sc->sc_type >= WM_T_82571) {
   2750 		ifp->if_capabilities |=
   2751 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2752 	}
   2753 
   2754 	/*
   2755 	 * If we're a i82544 or greater (except i82547), we can do
   2756 	 * TCP segmentation offload.
   2757 	 */
   2758 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2759 		ifp->if_capabilities |= IFCAP_TSOv4;
   2760 	}
   2761 
   2762 	if (sc->sc_type >= WM_T_82571) {
   2763 		ifp->if_capabilities |= IFCAP_TSOv6;
   2764 	}
   2765 
   2766 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2767 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2768 
   2769 #ifdef WM_MPSAFE
   2770 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2771 #else
   2772 	sc->sc_core_lock = NULL;
   2773 #endif
   2774 
   2775 	/* Attach the interface. */
   2776 	error = if_initialize(ifp);
   2777 	if (error != 0) {
   2778 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2779 		    error);
   2780 		return; /* Error */
   2781 	}
   2782 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2783 	ether_ifattach(ifp, enaddr);
   2784 	if_register(ifp);
   2785 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2786 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2787 			  RND_FLAG_DEFAULT);
   2788 
   2789 #ifdef WM_EVENT_COUNTERS
   2790 	/* Attach event counters. */
   2791 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2792 	    NULL, xname, "linkintr");
   2793 
   2794 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2795 	    NULL, xname, "tx_xoff");
   2796 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2797 	    NULL, xname, "tx_xon");
   2798 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2799 	    NULL, xname, "rx_xoff");
   2800 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2801 	    NULL, xname, "rx_xon");
   2802 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2803 	    NULL, xname, "rx_macctl");
   2804 #endif /* WM_EVENT_COUNTERS */
   2805 
   2806 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2807 		pmf_class_network_register(self, ifp);
   2808 	else
   2809 		aprint_error_dev(self, "couldn't establish power handler\n");
   2810 
   2811 	sc->sc_flags |= WM_F_ATTACHED;
   2812  out:
   2813 	return;
   2814 }
   2815 
   2816 /* The detach function (ca_detach) */
   2817 static int
   2818 wm_detach(device_t self, int flags __unused)
   2819 {
   2820 	struct wm_softc *sc = device_private(self);
   2821 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2822 	int i;
   2823 
   2824 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2825 		return 0;
   2826 
   2827 	/* Stop the interface. Callouts are stopped in it. */
   2828 	wm_stop(ifp, 1);
   2829 
   2830 	pmf_device_deregister(self);
   2831 
   2832 #ifdef WM_EVENT_COUNTERS
   2833 	evcnt_detach(&sc->sc_ev_linkintr);
   2834 
   2835 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2836 	evcnt_detach(&sc->sc_ev_tx_xon);
   2837 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2838 	evcnt_detach(&sc->sc_ev_rx_xon);
   2839 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2840 #endif /* WM_EVENT_COUNTERS */
   2841 
   2842 	/* Tell the firmware about the release */
   2843 	WM_CORE_LOCK(sc);
   2844 	wm_release_manageability(sc);
   2845 	wm_release_hw_control(sc);
   2846 	wm_enable_wakeup(sc);
   2847 	WM_CORE_UNLOCK(sc);
   2848 
   2849 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2850 
   2851 	/* Delete all remaining media. */
   2852 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2853 
   2854 	ether_ifdetach(ifp);
   2855 	if_detach(ifp);
   2856 	if_percpuq_destroy(sc->sc_ipq);
   2857 
   2858 	/* Unload RX dmamaps and free mbufs */
   2859 	for (i = 0; i < sc->sc_nqueues; i++) {
   2860 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2861 		mutex_enter(rxq->rxq_lock);
   2862 		wm_rxdrain(rxq);
   2863 		mutex_exit(rxq->rxq_lock);
   2864 	}
   2865 	/* Must unlock here */
   2866 
   2867 	/* Disestablish the interrupt handler */
   2868 	for (i = 0; i < sc->sc_nintrs; i++) {
   2869 		if (sc->sc_ihs[i] != NULL) {
   2870 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2871 			sc->sc_ihs[i] = NULL;
   2872 		}
   2873 	}
   2874 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2875 
   2876 	wm_free_txrx_queues(sc);
   2877 
   2878 	/* Unmap the registers */
   2879 	if (sc->sc_ss) {
   2880 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2881 		sc->sc_ss = 0;
   2882 	}
   2883 	if (sc->sc_ios) {
   2884 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2885 		sc->sc_ios = 0;
   2886 	}
   2887 	if (sc->sc_flashs) {
   2888 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2889 		sc->sc_flashs = 0;
   2890 	}
   2891 
   2892 	if (sc->sc_core_lock)
   2893 		mutex_obj_free(sc->sc_core_lock);
   2894 	if (sc->sc_ich_phymtx)
   2895 		mutex_obj_free(sc->sc_ich_phymtx);
   2896 	if (sc->sc_ich_nvmmtx)
   2897 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2898 
   2899 	return 0;
   2900 }
   2901 
   2902 static bool
   2903 wm_suspend(device_t self, const pmf_qual_t *qual)
   2904 {
   2905 	struct wm_softc *sc = device_private(self);
   2906 
   2907 	wm_release_manageability(sc);
   2908 	wm_release_hw_control(sc);
   2909 	wm_enable_wakeup(sc);
   2910 
   2911 	return true;
   2912 }
   2913 
   2914 static bool
   2915 wm_resume(device_t self, const pmf_qual_t *qual)
   2916 {
   2917 	struct wm_softc *sc = device_private(self);
   2918 
   2919 	/* Disable ASPM L0s and/or L1 for workaround */
   2920 	wm_disable_aspm(sc);
   2921 	wm_init_manageability(sc);
   2922 
   2923 	return true;
   2924 }
   2925 
   2926 /*
   2927  * wm_watchdog:		[ifnet interface function]
   2928  *
   2929  *	Watchdog timer handler.
   2930  */
   2931 static void
   2932 wm_watchdog(struct ifnet *ifp)
   2933 {
   2934 	int qid;
   2935 	struct wm_softc *sc = ifp->if_softc;
   2936 
   2937 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2938 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2939 
   2940 		wm_watchdog_txq(ifp, txq);
   2941 	}
   2942 
   2943 	/* Reset the interface. */
   2944 	(void) wm_init(ifp);
   2945 
   2946 	/*
   2947 	 * There are still some upper layer processing which call
   2948 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2949 	 */
   2950 	/* Try to get more packets going. */
   2951 	ifp->if_start(ifp);
   2952 }
   2953 
   2954 static void
   2955 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2956 {
   2957 
   2958 	mutex_enter(txq->txq_lock);
   2959 	wm_watchdog_txq_locked(ifp, txq);
   2960 	mutex_exit(txq->txq_lock);
   2961 }
   2962 
   2963 static void
   2964 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   2965 {
   2966 	struct wm_softc *sc = ifp->if_softc;
   2967 
   2968 	KASSERT(mutex_owned(txq->txq_lock));
   2969 
   2970 	/*
   2971 	 * Since we're using delayed interrupts, sweep up
   2972 	 * before we report an error.
   2973 	 */
   2974 	wm_txeof(sc, txq);
   2975 
   2976 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2977 #ifdef WM_DEBUG
   2978 		int i, j;
   2979 		struct wm_txsoft *txs;
   2980 #endif
   2981 		log(LOG_ERR,
   2982 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2983 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2984 		    txq->txq_next);
   2985 		ifp->if_oerrors++;
   2986 #ifdef WM_DEBUG
   2987 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2988 		    i = WM_NEXTTXS(txq, i)) {
   2989 		    txs = &txq->txq_soft[i];
   2990 		    printf("txs %d tx %d -> %d\n",
   2991 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2992 		    for (j = txs->txs_firstdesc; ;
   2993 			j = WM_NEXTTX(txq, j)) {
   2994 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2995 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2996 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2997 				    printf("\t %#08x%08x\n",
   2998 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2999 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3000 			    } else {
   3001 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3002 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3003 					txq->txq_descs[j].wtx_addr.wa_low);
   3004 				    printf("\t %#04x%02x%02x%08x\n",
   3005 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3006 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3007 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3008 					txq->txq_descs[j].wtx_cmdlen);
   3009 			    }
   3010 			if (j == txs->txs_lastdesc)
   3011 				break;
   3012 			}
   3013 		}
   3014 #endif
   3015 	}
   3016 }
   3017 
   3018 /*
   3019  * wm_tick:
   3020  *
   3021  *	One second timer, used to check link status, sweep up
   3022  *	completed transmit jobs, etc.
   3023  */
   3024 static void
   3025 wm_tick(void *arg)
   3026 {
   3027 	struct wm_softc *sc = arg;
   3028 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3029 #ifndef WM_MPSAFE
   3030 	int s = splnet();
   3031 #endif
   3032 
   3033 	WM_CORE_LOCK(sc);
   3034 
   3035 	if (sc->sc_core_stopping)
   3036 		goto out;
   3037 
   3038 	if (sc->sc_type >= WM_T_82542_2_1) {
   3039 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3040 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3041 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3042 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3043 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3044 	}
   3045 
   3046 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3047 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3048 	    + CSR_READ(sc, WMREG_CRCERRS)
   3049 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3050 	    + CSR_READ(sc, WMREG_SYMERRC)
   3051 	    + CSR_READ(sc, WMREG_RXERRC)
   3052 	    + CSR_READ(sc, WMREG_SEC)
   3053 	    + CSR_READ(sc, WMREG_CEXTERR)
   3054 	    + CSR_READ(sc, WMREG_RLEC);
   3055 	/*
   3056 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3057 	 * memory. It does not mean the number of dropped packet. Because
   3058 	 * ethernet controller can receive packets in such case if there is
   3059 	 * space in phy's FIFO.
   3060 	 *
   3061 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3062 	 * own EVCNT instead of if_iqdrops.
   3063 	 */
   3064 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3065 
   3066 	if (sc->sc_flags & WM_F_HAS_MII)
   3067 		mii_tick(&sc->sc_mii);
   3068 	else if ((sc->sc_type >= WM_T_82575)
   3069 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3070 		wm_serdes_tick(sc);
   3071 	else
   3072 		wm_tbi_tick(sc);
   3073 
   3074 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3075 out:
   3076 	WM_CORE_UNLOCK(sc);
   3077 #ifndef WM_MPSAFE
   3078 	splx(s);
   3079 #endif
   3080 }
   3081 
   3082 static int
   3083 wm_ifflags_cb(struct ethercom *ec)
   3084 {
   3085 	struct ifnet *ifp = &ec->ec_if;
   3086 	struct wm_softc *sc = ifp->if_softc;
   3087 	int rc = 0;
   3088 
   3089 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3090 		device_xname(sc->sc_dev), __func__));
   3091 
   3092 	WM_CORE_LOCK(sc);
   3093 
   3094 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3095 	sc->sc_if_flags = ifp->if_flags;
   3096 
   3097 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3098 		rc = ENETRESET;
   3099 		goto out;
   3100 	}
   3101 
   3102 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3103 		wm_set_filter(sc);
   3104 
   3105 	wm_set_vlan(sc);
   3106 
   3107 out:
   3108 	WM_CORE_UNLOCK(sc);
   3109 
   3110 	return rc;
   3111 }
   3112 
   3113 /*
   3114  * wm_ioctl:		[ifnet interface function]
   3115  *
   3116  *	Handle control requests from the operator.
   3117  */
   3118 static int
   3119 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3120 {
   3121 	struct wm_softc *sc = ifp->if_softc;
   3122 	struct ifreq *ifr = (struct ifreq *) data;
   3123 	struct ifaddr *ifa = (struct ifaddr *)data;
   3124 	struct sockaddr_dl *sdl;
   3125 	int s, error;
   3126 
   3127 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3128 		device_xname(sc->sc_dev), __func__));
   3129 
   3130 #ifndef WM_MPSAFE
   3131 	s = splnet();
   3132 #endif
   3133 	switch (cmd) {
   3134 	case SIOCSIFMEDIA:
   3135 	case SIOCGIFMEDIA:
   3136 		WM_CORE_LOCK(sc);
   3137 		/* Flow control requires full-duplex mode. */
   3138 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3139 		    (ifr->ifr_media & IFM_FDX) == 0)
   3140 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3141 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3142 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3143 				/* We can do both TXPAUSE and RXPAUSE. */
   3144 				ifr->ifr_media |=
   3145 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3146 			}
   3147 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3148 		}
   3149 		WM_CORE_UNLOCK(sc);
   3150 #ifdef WM_MPSAFE
   3151 		s = splnet();
   3152 #endif
   3153 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3154 #ifdef WM_MPSAFE
   3155 		splx(s);
   3156 #endif
   3157 		break;
   3158 	case SIOCINITIFADDR:
   3159 		WM_CORE_LOCK(sc);
   3160 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3161 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3162 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3163 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3164 			/* unicast address is first multicast entry */
   3165 			wm_set_filter(sc);
   3166 			error = 0;
   3167 			WM_CORE_UNLOCK(sc);
   3168 			break;
   3169 		}
   3170 		WM_CORE_UNLOCK(sc);
   3171 		/*FALLTHROUGH*/
   3172 	default:
   3173 #ifdef WM_MPSAFE
   3174 		s = splnet();
   3175 #endif
   3176 		/* It may call wm_start, so unlock here */
   3177 		error = ether_ioctl(ifp, cmd, data);
   3178 #ifdef WM_MPSAFE
   3179 		splx(s);
   3180 #endif
   3181 		if (error != ENETRESET)
   3182 			break;
   3183 
   3184 		error = 0;
   3185 
   3186 		if (cmd == SIOCSIFCAP) {
   3187 			error = (*ifp->if_init)(ifp);
   3188 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3189 			;
   3190 		else if (ifp->if_flags & IFF_RUNNING) {
   3191 			/*
   3192 			 * Multicast list has changed; set the hardware filter
   3193 			 * accordingly.
   3194 			 */
   3195 			WM_CORE_LOCK(sc);
   3196 			wm_set_filter(sc);
   3197 			WM_CORE_UNLOCK(sc);
   3198 		}
   3199 		break;
   3200 	}
   3201 
   3202 #ifndef WM_MPSAFE
   3203 	splx(s);
   3204 #endif
   3205 	return error;
   3206 }
   3207 
   3208 /* MAC address related */
   3209 
   3210 /*
   3211  * Get the offset of MAC address and return it.
   3212  * If error occured, use offset 0.
   3213  */
   3214 static uint16_t
   3215 wm_check_alt_mac_addr(struct wm_softc *sc)
   3216 {
   3217 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3218 	uint16_t offset = NVM_OFF_MACADDR;
   3219 
   3220 	/* Try to read alternative MAC address pointer */
   3221 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3222 		return 0;
   3223 
   3224 	/* Check pointer if it's valid or not. */
   3225 	if ((offset == 0x0000) || (offset == 0xffff))
   3226 		return 0;
   3227 
   3228 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3229 	/*
   3230 	 * Check whether alternative MAC address is valid or not.
   3231 	 * Some cards have non 0xffff pointer but those don't use
   3232 	 * alternative MAC address in reality.
   3233 	 *
   3234 	 * Check whether the broadcast bit is set or not.
   3235 	 */
   3236 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3237 		if (((myea[0] & 0xff) & 0x01) == 0)
   3238 			return offset; /* Found */
   3239 
   3240 	/* Not found */
   3241 	return 0;
   3242 }
   3243 
   3244 static int
   3245 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3246 {
   3247 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3248 	uint16_t offset = NVM_OFF_MACADDR;
   3249 	int do_invert = 0;
   3250 
   3251 	switch (sc->sc_type) {
   3252 	case WM_T_82580:
   3253 	case WM_T_I350:
   3254 	case WM_T_I354:
   3255 		/* EEPROM Top Level Partitioning */
   3256 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3257 		break;
   3258 	case WM_T_82571:
   3259 	case WM_T_82575:
   3260 	case WM_T_82576:
   3261 	case WM_T_80003:
   3262 	case WM_T_I210:
   3263 	case WM_T_I211:
   3264 		offset = wm_check_alt_mac_addr(sc);
   3265 		if (offset == 0)
   3266 			if ((sc->sc_funcid & 0x01) == 1)
   3267 				do_invert = 1;
   3268 		break;
   3269 	default:
   3270 		if ((sc->sc_funcid & 0x01) == 1)
   3271 			do_invert = 1;
   3272 		break;
   3273 	}
   3274 
   3275 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3276 		goto bad;
   3277 
   3278 	enaddr[0] = myea[0] & 0xff;
   3279 	enaddr[1] = myea[0] >> 8;
   3280 	enaddr[2] = myea[1] & 0xff;
   3281 	enaddr[3] = myea[1] >> 8;
   3282 	enaddr[4] = myea[2] & 0xff;
   3283 	enaddr[5] = myea[2] >> 8;
   3284 
   3285 	/*
   3286 	 * Toggle the LSB of the MAC address on the second port
   3287 	 * of some dual port cards.
   3288 	 */
   3289 	if (do_invert != 0)
   3290 		enaddr[5] ^= 1;
   3291 
   3292 	return 0;
   3293 
   3294  bad:
   3295 	return -1;
   3296 }
   3297 
   3298 /*
   3299  * wm_set_ral:
   3300  *
   3301  *	Set an entery in the receive address list.
   3302  */
   3303 static void
   3304 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3305 {
   3306 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3307 	uint32_t wlock_mac;
   3308 	int rv;
   3309 
   3310 	if (enaddr != NULL) {
   3311 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3312 		    (enaddr[3] << 24);
   3313 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3314 		ral_hi |= RAL_AV;
   3315 	} else {
   3316 		ral_lo = 0;
   3317 		ral_hi = 0;
   3318 	}
   3319 
   3320 	switch (sc->sc_type) {
   3321 	case WM_T_82542_2_0:
   3322 	case WM_T_82542_2_1:
   3323 	case WM_T_82543:
   3324 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3325 		CSR_WRITE_FLUSH(sc);
   3326 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3327 		CSR_WRITE_FLUSH(sc);
   3328 		break;
   3329 	case WM_T_PCH2:
   3330 	case WM_T_PCH_LPT:
   3331 	case WM_T_PCH_SPT:
   3332 		if (idx == 0) {
   3333 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3334 			CSR_WRITE_FLUSH(sc);
   3335 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3336 			CSR_WRITE_FLUSH(sc);
   3337 			return;
   3338 		}
   3339 		if (sc->sc_type != WM_T_PCH2) {
   3340 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3341 			    FWSM_WLOCK_MAC);
   3342 			addrl = WMREG_SHRAL(idx - 1);
   3343 			addrh = WMREG_SHRAH(idx - 1);
   3344 		} else {
   3345 			wlock_mac = 0;
   3346 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3347 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3348 		}
   3349 
   3350 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3351 			rv = wm_get_swflag_ich8lan(sc);
   3352 			if (rv != 0)
   3353 				return;
   3354 			CSR_WRITE(sc, addrl, ral_lo);
   3355 			CSR_WRITE_FLUSH(sc);
   3356 			CSR_WRITE(sc, addrh, ral_hi);
   3357 			CSR_WRITE_FLUSH(sc);
   3358 			wm_put_swflag_ich8lan(sc);
   3359 		}
   3360 
   3361 		break;
   3362 	default:
   3363 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3364 		CSR_WRITE_FLUSH(sc);
   3365 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3366 		CSR_WRITE_FLUSH(sc);
   3367 		break;
   3368 	}
   3369 }
   3370 
   3371 /*
   3372  * wm_mchash:
   3373  *
   3374  *	Compute the hash of the multicast address for the 4096-bit
   3375  *	multicast filter.
   3376  */
   3377 static uint32_t
   3378 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3379 {
   3380 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3381 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3382 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3383 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3384 	uint32_t hash;
   3385 
   3386 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3387 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3388 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3389 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3390 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3391 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3392 		return (hash & 0x3ff);
   3393 	}
   3394 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3395 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3396 
   3397 	return (hash & 0xfff);
   3398 }
   3399 
   3400 /*
   3401  * wm_set_filter:
   3402  *
   3403  *	Set up the receive filter.
   3404  */
   3405 static void
   3406 wm_set_filter(struct wm_softc *sc)
   3407 {
   3408 	struct ethercom *ec = &sc->sc_ethercom;
   3409 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3410 	struct ether_multi *enm;
   3411 	struct ether_multistep step;
   3412 	bus_addr_t mta_reg;
   3413 	uint32_t hash, reg, bit;
   3414 	int i, size, ralmax;
   3415 
   3416 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3417 		device_xname(sc->sc_dev), __func__));
   3418 
   3419 	if (sc->sc_type >= WM_T_82544)
   3420 		mta_reg = WMREG_CORDOVA_MTA;
   3421 	else
   3422 		mta_reg = WMREG_MTA;
   3423 
   3424 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3425 
   3426 	if (ifp->if_flags & IFF_BROADCAST)
   3427 		sc->sc_rctl |= RCTL_BAM;
   3428 	if (ifp->if_flags & IFF_PROMISC) {
   3429 		sc->sc_rctl |= RCTL_UPE;
   3430 		goto allmulti;
   3431 	}
   3432 
   3433 	/*
   3434 	 * Set the station address in the first RAL slot, and
   3435 	 * clear the remaining slots.
   3436 	 */
   3437 	if (sc->sc_type == WM_T_ICH8)
   3438 		size = WM_RAL_TABSIZE_ICH8 -1;
   3439 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3440 	    || (sc->sc_type == WM_T_PCH))
   3441 		size = WM_RAL_TABSIZE_ICH8;
   3442 	else if (sc->sc_type == WM_T_PCH2)
   3443 		size = WM_RAL_TABSIZE_PCH2;
   3444 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3445 		size = WM_RAL_TABSIZE_PCH_LPT;
   3446 	else if (sc->sc_type == WM_T_82575)
   3447 		size = WM_RAL_TABSIZE_82575;
   3448 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3449 		size = WM_RAL_TABSIZE_82576;
   3450 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3451 		size = WM_RAL_TABSIZE_I350;
   3452 	else
   3453 		size = WM_RAL_TABSIZE;
   3454 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3455 
   3456 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3457 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3458 		switch (i) {
   3459 		case 0:
   3460 			/* We can use all entries */
   3461 			ralmax = size;
   3462 			break;
   3463 		case 1:
   3464 			/* Only RAR[0] */
   3465 			ralmax = 1;
   3466 			break;
   3467 		default:
   3468 			/* available SHRA + RAR[0] */
   3469 			ralmax = i + 1;
   3470 		}
   3471 	} else
   3472 		ralmax = size;
   3473 	for (i = 1; i < size; i++) {
   3474 		if (i < ralmax)
   3475 			wm_set_ral(sc, NULL, i);
   3476 	}
   3477 
   3478 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3479 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3480 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3481 	    || (sc->sc_type == WM_T_PCH_SPT))
   3482 		size = WM_ICH8_MC_TABSIZE;
   3483 	else
   3484 		size = WM_MC_TABSIZE;
   3485 	/* Clear out the multicast table. */
   3486 	for (i = 0; i < size; i++) {
   3487 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3488 		CSR_WRITE_FLUSH(sc);
   3489 	}
   3490 
   3491 	ETHER_LOCK(ec);
   3492 	ETHER_FIRST_MULTI(step, ec, enm);
   3493 	while (enm != NULL) {
   3494 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3495 			ETHER_UNLOCK(ec);
   3496 			/*
   3497 			 * We must listen to a range of multicast addresses.
   3498 			 * For now, just accept all multicasts, rather than
   3499 			 * trying to set only those filter bits needed to match
   3500 			 * the range.  (At this time, the only use of address
   3501 			 * ranges is for IP multicast routing, for which the
   3502 			 * range is big enough to require all bits set.)
   3503 			 */
   3504 			goto allmulti;
   3505 		}
   3506 
   3507 		hash = wm_mchash(sc, enm->enm_addrlo);
   3508 
   3509 		reg = (hash >> 5);
   3510 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3511 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3512 		    || (sc->sc_type == WM_T_PCH2)
   3513 		    || (sc->sc_type == WM_T_PCH_LPT)
   3514 		    || (sc->sc_type == WM_T_PCH_SPT))
   3515 			reg &= 0x1f;
   3516 		else
   3517 			reg &= 0x7f;
   3518 		bit = hash & 0x1f;
   3519 
   3520 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3521 		hash |= 1U << bit;
   3522 
   3523 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3524 			/*
   3525 			 * 82544 Errata 9: Certain register cannot be written
   3526 			 * with particular alignments in PCI-X bus operation
   3527 			 * (FCAH, MTA and VFTA).
   3528 			 */
   3529 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3530 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3531 			CSR_WRITE_FLUSH(sc);
   3532 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3533 			CSR_WRITE_FLUSH(sc);
   3534 		} else {
   3535 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3536 			CSR_WRITE_FLUSH(sc);
   3537 		}
   3538 
   3539 		ETHER_NEXT_MULTI(step, enm);
   3540 	}
   3541 	ETHER_UNLOCK(ec);
   3542 
   3543 	ifp->if_flags &= ~IFF_ALLMULTI;
   3544 	goto setit;
   3545 
   3546  allmulti:
   3547 	ifp->if_flags |= IFF_ALLMULTI;
   3548 	sc->sc_rctl |= RCTL_MPE;
   3549 
   3550  setit:
   3551 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3552 }
   3553 
   3554 /* Reset and init related */
   3555 
   3556 static void
   3557 wm_set_vlan(struct wm_softc *sc)
   3558 {
   3559 
   3560 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3561 		device_xname(sc->sc_dev), __func__));
   3562 
   3563 	/* Deal with VLAN enables. */
   3564 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3565 		sc->sc_ctrl |= CTRL_VME;
   3566 	else
   3567 		sc->sc_ctrl &= ~CTRL_VME;
   3568 
   3569 	/* Write the control registers. */
   3570 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3571 }
   3572 
   3573 static void
   3574 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3575 {
   3576 	uint32_t gcr;
   3577 	pcireg_t ctrl2;
   3578 
   3579 	gcr = CSR_READ(sc, WMREG_GCR);
   3580 
   3581 	/* Only take action if timeout value is defaulted to 0 */
   3582 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3583 		goto out;
   3584 
   3585 	if ((gcr & GCR_CAP_VER2) == 0) {
   3586 		gcr |= GCR_CMPL_TMOUT_10MS;
   3587 		goto out;
   3588 	}
   3589 
   3590 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3591 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3592 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3593 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3594 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3595 
   3596 out:
   3597 	/* Disable completion timeout resend */
   3598 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3599 
   3600 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3601 }
   3602 
   3603 void
   3604 wm_get_auto_rd_done(struct wm_softc *sc)
   3605 {
   3606 	int i;
   3607 
   3608 	/* wait for eeprom to reload */
   3609 	switch (sc->sc_type) {
   3610 	case WM_T_82571:
   3611 	case WM_T_82572:
   3612 	case WM_T_82573:
   3613 	case WM_T_82574:
   3614 	case WM_T_82583:
   3615 	case WM_T_82575:
   3616 	case WM_T_82576:
   3617 	case WM_T_82580:
   3618 	case WM_T_I350:
   3619 	case WM_T_I354:
   3620 	case WM_T_I210:
   3621 	case WM_T_I211:
   3622 	case WM_T_80003:
   3623 	case WM_T_ICH8:
   3624 	case WM_T_ICH9:
   3625 		for (i = 0; i < 10; i++) {
   3626 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3627 				break;
   3628 			delay(1000);
   3629 		}
   3630 		if (i == 10) {
   3631 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3632 			    "complete\n", device_xname(sc->sc_dev));
   3633 		}
   3634 		break;
   3635 	default:
   3636 		break;
   3637 	}
   3638 }
   3639 
   3640 void
   3641 wm_lan_init_done(struct wm_softc *sc)
   3642 {
   3643 	uint32_t reg = 0;
   3644 	int i;
   3645 
   3646 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3647 		device_xname(sc->sc_dev), __func__));
   3648 
   3649 	/* Wait for eeprom to reload */
   3650 	switch (sc->sc_type) {
   3651 	case WM_T_ICH10:
   3652 	case WM_T_PCH:
   3653 	case WM_T_PCH2:
   3654 	case WM_T_PCH_LPT:
   3655 	case WM_T_PCH_SPT:
   3656 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3657 			reg = CSR_READ(sc, WMREG_STATUS);
   3658 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3659 				break;
   3660 			delay(100);
   3661 		}
   3662 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3663 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3664 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3665 		}
   3666 		break;
   3667 	default:
   3668 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3669 		    __func__);
   3670 		break;
   3671 	}
   3672 
   3673 	reg &= ~STATUS_LAN_INIT_DONE;
   3674 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3675 }
   3676 
   3677 void
   3678 wm_get_cfg_done(struct wm_softc *sc)
   3679 {
   3680 	int mask;
   3681 	uint32_t reg;
   3682 	int i;
   3683 
   3684 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3685 		device_xname(sc->sc_dev), __func__));
   3686 
   3687 	/* Wait for eeprom to reload */
   3688 	switch (sc->sc_type) {
   3689 	case WM_T_82542_2_0:
   3690 	case WM_T_82542_2_1:
   3691 		/* null */
   3692 		break;
   3693 	case WM_T_82543:
   3694 	case WM_T_82544:
   3695 	case WM_T_82540:
   3696 	case WM_T_82545:
   3697 	case WM_T_82545_3:
   3698 	case WM_T_82546:
   3699 	case WM_T_82546_3:
   3700 	case WM_T_82541:
   3701 	case WM_T_82541_2:
   3702 	case WM_T_82547:
   3703 	case WM_T_82547_2:
   3704 	case WM_T_82573:
   3705 	case WM_T_82574:
   3706 	case WM_T_82583:
   3707 		/* generic */
   3708 		delay(10*1000);
   3709 		break;
   3710 	case WM_T_80003:
   3711 	case WM_T_82571:
   3712 	case WM_T_82572:
   3713 	case WM_T_82575:
   3714 	case WM_T_82576:
   3715 	case WM_T_82580:
   3716 	case WM_T_I350:
   3717 	case WM_T_I354:
   3718 	case WM_T_I210:
   3719 	case WM_T_I211:
   3720 		if (sc->sc_type == WM_T_82571) {
   3721 			/* Only 82571 shares port 0 */
   3722 			mask = EEMNGCTL_CFGDONE_0;
   3723 		} else
   3724 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3725 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3726 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3727 				break;
   3728 			delay(1000);
   3729 		}
   3730 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3731 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3732 				device_xname(sc->sc_dev), __func__));
   3733 		}
   3734 		break;
   3735 	case WM_T_ICH8:
   3736 	case WM_T_ICH9:
   3737 	case WM_T_ICH10:
   3738 	case WM_T_PCH:
   3739 	case WM_T_PCH2:
   3740 	case WM_T_PCH_LPT:
   3741 	case WM_T_PCH_SPT:
   3742 		delay(10*1000);
   3743 		if (sc->sc_type >= WM_T_ICH10)
   3744 			wm_lan_init_done(sc);
   3745 		else
   3746 			wm_get_auto_rd_done(sc);
   3747 
   3748 		reg = CSR_READ(sc, WMREG_STATUS);
   3749 		if ((reg & STATUS_PHYRA) != 0)
   3750 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3751 		break;
   3752 	default:
   3753 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3754 		    __func__);
   3755 		break;
   3756 	}
   3757 }
   3758 
   3759 void
   3760 wm_phy_post_reset(struct wm_softc *sc)
   3761 {
   3762 	uint32_t reg;
   3763 
   3764 	/* This function is only for ICH8 and newer. */
   3765 	if (sc->sc_type < WM_T_ICH8)
   3766 		return;
   3767 
   3768 	if (wm_phy_resetisblocked(sc)) {
   3769 		/* XXX */
   3770 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3771 		return;
   3772 	}
   3773 
   3774 	/* Allow time for h/w to get to quiescent state after reset */
   3775 	delay(10*1000);
   3776 
   3777 	/* Perform any necessary post-reset workarounds */
   3778 	if (sc->sc_type == WM_T_PCH)
   3779 		wm_hv_phy_workaround_ich8lan(sc);
   3780 	if (sc->sc_type == WM_T_PCH2)
   3781 		wm_lv_phy_workaround_ich8lan(sc);
   3782 
   3783 	/* Clear the host wakeup bit after lcd reset */
   3784 	if (sc->sc_type >= WM_T_PCH) {
   3785 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3786 		    BM_PORT_GEN_CFG);
   3787 		reg &= ~BM_WUC_HOST_WU_BIT;
   3788 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3789 		    BM_PORT_GEN_CFG, reg);
   3790 	}
   3791 
   3792 	/* Configure the LCD with the extended configuration region in NVM */
   3793 	wm_init_lcd_from_nvm(sc);
   3794 
   3795 	/* Configure the LCD with the OEM bits in NVM */
   3796 }
   3797 
   3798 /* Only for PCH and newer */
   3799 static void
   3800 wm_write_smbus_addr(struct wm_softc *sc)
   3801 {
   3802 	uint32_t strap, freq;
   3803 	uint32_t phy_data;
   3804 
   3805 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3806 		device_xname(sc->sc_dev), __func__));
   3807 
   3808 	strap = CSR_READ(sc, WMREG_STRAP);
   3809 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3810 
   3811 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3812 
   3813 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3814 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3815 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3816 
   3817 	if (sc->sc_phytype == WMPHY_I217) {
   3818 		/* Restore SMBus frequency */
   3819 		if (freq --) {
   3820 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3821 			    | HV_SMB_ADDR_FREQ_HIGH);
   3822 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3823 			    HV_SMB_ADDR_FREQ_LOW);
   3824 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3825 			    HV_SMB_ADDR_FREQ_HIGH);
   3826 		} else {
   3827 			DPRINTF(WM_DEBUG_INIT,
   3828 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3829 				device_xname(sc->sc_dev), __func__));
   3830 		}
   3831 	}
   3832 
   3833 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3834 }
   3835 
   3836 void
   3837 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3838 {
   3839 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3840 	uint16_t phy_page = 0;
   3841 
   3842 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3843 		device_xname(sc->sc_dev), __func__));
   3844 
   3845 	switch (sc->sc_type) {
   3846 	case WM_T_ICH8:
   3847 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3848 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3849 			return;
   3850 
   3851 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3852 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3853 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3854 			break;
   3855 		}
   3856 		/* FALLTHROUGH */
   3857 	case WM_T_PCH:
   3858 	case WM_T_PCH2:
   3859 	case WM_T_PCH_LPT:
   3860 	case WM_T_PCH_SPT:
   3861 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3862 		break;
   3863 	default:
   3864 		return;
   3865 	}
   3866 
   3867 	sc->phy.acquire(sc);
   3868 
   3869 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3870 	if ((reg & sw_cfg_mask) == 0)
   3871 		goto release;
   3872 
   3873 	/*
   3874 	 * Make sure HW does not configure LCD from PHY extended configuration
   3875 	 * before SW configuration
   3876 	 */
   3877 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3878 	if ((sc->sc_type < WM_T_PCH2)
   3879 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3880 		goto release;
   3881 
   3882 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3883 		device_xname(sc->sc_dev), __func__));
   3884 	/* word_addr is in DWORD */
   3885 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3886 
   3887 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3888 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3889 
   3890 	if (((sc->sc_type == WM_T_PCH)
   3891 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3892 	    || (sc->sc_type > WM_T_PCH)) {
   3893 		/*
   3894 		 * HW configures the SMBus address and LEDs when the OEM and
   3895 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3896 		 * are cleared, SW will configure them instead.
   3897 		 */
   3898 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3899 			device_xname(sc->sc_dev), __func__));
   3900 		wm_write_smbus_addr(sc);
   3901 
   3902 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3903 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3904 	}
   3905 
   3906 	/* Configure LCD from extended configuration region. */
   3907 	for (i = 0; i < cnf_size; i++) {
   3908 		uint16_t reg_data, reg_addr;
   3909 
   3910 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3911 			goto release;
   3912 
   3913 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3914 			goto release;
   3915 
   3916 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   3917 			phy_page = reg_data;
   3918 
   3919 		reg_addr &= IGPHY_MAXREGADDR;
   3920 		reg_addr |= phy_page;
   3921 
   3922 		sc->phy.release(sc); /* XXX */
   3923 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   3924 		sc->phy.acquire(sc); /* XXX */
   3925 	}
   3926 
   3927 release:
   3928 	sc->phy.release(sc);
   3929 	return;
   3930 }
   3931 
   3932 
   3933 /* Init hardware bits */
   3934 void
   3935 wm_initialize_hardware_bits(struct wm_softc *sc)
   3936 {
   3937 	uint32_t tarc0, tarc1, reg;
   3938 
   3939 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3940 		device_xname(sc->sc_dev), __func__));
   3941 
   3942 	/* For 82571 variant, 80003 and ICHs */
   3943 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3944 	    || (sc->sc_type >= WM_T_80003)) {
   3945 
   3946 		/* Transmit Descriptor Control 0 */
   3947 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3948 		reg |= TXDCTL_COUNT_DESC;
   3949 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3950 
   3951 		/* Transmit Descriptor Control 1 */
   3952 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3953 		reg |= TXDCTL_COUNT_DESC;
   3954 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3955 
   3956 		/* TARC0 */
   3957 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3958 		switch (sc->sc_type) {
   3959 		case WM_T_82571:
   3960 		case WM_T_82572:
   3961 		case WM_T_82573:
   3962 		case WM_T_82574:
   3963 		case WM_T_82583:
   3964 		case WM_T_80003:
   3965 			/* Clear bits 30..27 */
   3966 			tarc0 &= ~__BITS(30, 27);
   3967 			break;
   3968 		default:
   3969 			break;
   3970 		}
   3971 
   3972 		switch (sc->sc_type) {
   3973 		case WM_T_82571:
   3974 		case WM_T_82572:
   3975 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3976 
   3977 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3978 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3979 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3980 			/* 8257[12] Errata No.7 */
   3981 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3982 
   3983 			/* TARC1 bit 28 */
   3984 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3985 				tarc1 &= ~__BIT(28);
   3986 			else
   3987 				tarc1 |= __BIT(28);
   3988 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3989 
   3990 			/*
   3991 			 * 8257[12] Errata No.13
   3992 			 * Disable Dyamic Clock Gating.
   3993 			 */
   3994 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3995 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3996 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3997 			break;
   3998 		case WM_T_82573:
   3999 		case WM_T_82574:
   4000 		case WM_T_82583:
   4001 			if ((sc->sc_type == WM_T_82574)
   4002 			    || (sc->sc_type == WM_T_82583))
   4003 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4004 
   4005 			/* Extended Device Control */
   4006 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4007 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4008 			reg |= __BIT(22);	/* Set bit 22 */
   4009 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4010 
   4011 			/* Device Control */
   4012 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4013 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4014 
   4015 			/* PCIe Control Register */
   4016 			/*
   4017 			 * 82573 Errata (unknown).
   4018 			 *
   4019 			 * 82574 Errata 25 and 82583 Errata 12
   4020 			 * "Dropped Rx Packets":
   4021 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4022 			 */
   4023 			reg = CSR_READ(sc, WMREG_GCR);
   4024 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4025 			CSR_WRITE(sc, WMREG_GCR, reg);
   4026 
   4027 			if ((sc->sc_type == WM_T_82574)
   4028 			    || (sc->sc_type == WM_T_82583)) {
   4029 				/*
   4030 				 * Document says this bit must be set for
   4031 				 * proper operation.
   4032 				 */
   4033 				reg = CSR_READ(sc, WMREG_GCR);
   4034 				reg |= __BIT(22);
   4035 				CSR_WRITE(sc, WMREG_GCR, reg);
   4036 
   4037 				/*
   4038 				 * Apply workaround for hardware errata
   4039 				 * documented in errata docs Fixes issue where
   4040 				 * some error prone or unreliable PCIe
   4041 				 * completions are occurring, particularly
   4042 				 * with ASPM enabled. Without fix, issue can
   4043 				 * cause Tx timeouts.
   4044 				 */
   4045 				reg = CSR_READ(sc, WMREG_GCR2);
   4046 				reg |= __BIT(0);
   4047 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4048 			}
   4049 			break;
   4050 		case WM_T_80003:
   4051 			/* TARC0 */
   4052 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4053 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4054 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4055 
   4056 			/* TARC1 bit 28 */
   4057 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4058 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4059 				tarc1 &= ~__BIT(28);
   4060 			else
   4061 				tarc1 |= __BIT(28);
   4062 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4063 			break;
   4064 		case WM_T_ICH8:
   4065 		case WM_T_ICH9:
   4066 		case WM_T_ICH10:
   4067 		case WM_T_PCH:
   4068 		case WM_T_PCH2:
   4069 		case WM_T_PCH_LPT:
   4070 		case WM_T_PCH_SPT:
   4071 			/* TARC0 */
   4072 			if (sc->sc_type == WM_T_ICH8) {
   4073 				/* Set TARC0 bits 29 and 28 */
   4074 				tarc0 |= __BITS(29, 28);
   4075 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4076 				tarc0 |= __BIT(29);
   4077 				/*
   4078 				 *  Drop bit 28. From Linux.
   4079 				 * See I218/I219 spec update
   4080 				 * "5. Buffer Overrun While the I219 is
   4081 				 * Processing DMA Transactions"
   4082 				 */
   4083 				tarc0 &= ~__BIT(28);
   4084 			}
   4085 			/* Set TARC0 bits 23,24,26,27 */
   4086 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4087 
   4088 			/* CTRL_EXT */
   4089 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4090 			reg |= __BIT(22);	/* Set bit 22 */
   4091 			/*
   4092 			 * Enable PHY low-power state when MAC is at D3
   4093 			 * w/o WoL
   4094 			 */
   4095 			if (sc->sc_type >= WM_T_PCH)
   4096 				reg |= CTRL_EXT_PHYPDEN;
   4097 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4098 
   4099 			/* TARC1 */
   4100 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4101 			/* bit 28 */
   4102 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4103 				tarc1 &= ~__BIT(28);
   4104 			else
   4105 				tarc1 |= __BIT(28);
   4106 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4107 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4108 
   4109 			/* Device Status */
   4110 			if (sc->sc_type == WM_T_ICH8) {
   4111 				reg = CSR_READ(sc, WMREG_STATUS);
   4112 				reg &= ~__BIT(31);
   4113 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4114 
   4115 			}
   4116 
   4117 			/* IOSFPC */
   4118 			if (sc->sc_type == WM_T_PCH_SPT) {
   4119 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4120 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4121 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4122 			}
   4123 			/*
   4124 			 * Work-around descriptor data corruption issue during
   4125 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4126 			 * capability.
   4127 			 */
   4128 			reg = CSR_READ(sc, WMREG_RFCTL);
   4129 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4130 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4131 			break;
   4132 		default:
   4133 			break;
   4134 		}
   4135 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4136 
   4137 		switch (sc->sc_type) {
   4138 		/*
   4139 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4140 		 * Avoid RSS Hash Value bug.
   4141 		 */
   4142 		case WM_T_82571:
   4143 		case WM_T_82572:
   4144 		case WM_T_82573:
   4145 		case WM_T_80003:
   4146 		case WM_T_ICH8:
   4147 			reg = CSR_READ(sc, WMREG_RFCTL);
   4148 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4149 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4150 			break;
   4151 		case WM_T_82574:
   4152 			/* use extened Rx descriptor. */
   4153 			reg = CSR_READ(sc, WMREG_RFCTL);
   4154 			reg |= WMREG_RFCTL_EXSTEN;
   4155 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4156 			break;
   4157 		default:
   4158 			break;
   4159 		}
   4160 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4161 		/*
   4162 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4163 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4164 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4165 		 * Correctly by the Device"
   4166 		 *
   4167 		 * I354(C2000) Errata AVR53:
   4168 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4169 		 * Hang"
   4170 		 */
   4171 		reg = CSR_READ(sc, WMREG_RFCTL);
   4172 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4173 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4174 	}
   4175 }
   4176 
   4177 static uint32_t
   4178 wm_rxpbs_adjust_82580(uint32_t val)
   4179 {
   4180 	uint32_t rv = 0;
   4181 
   4182 	if (val < __arraycount(wm_82580_rxpbs_table))
   4183 		rv = wm_82580_rxpbs_table[val];
   4184 
   4185 	return rv;
   4186 }
   4187 
   4188 /*
   4189  * wm_reset_phy:
   4190  *
   4191  *	generic PHY reset function.
   4192  *	Same as e1000_phy_hw_reset_generic()
   4193  */
   4194 static void
   4195 wm_reset_phy(struct wm_softc *sc)
   4196 {
   4197 	uint32_t reg;
   4198 
   4199 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4200 		device_xname(sc->sc_dev), __func__));
   4201 	if (wm_phy_resetisblocked(sc))
   4202 		return;
   4203 
   4204 	sc->phy.acquire(sc);
   4205 
   4206 	reg = CSR_READ(sc, WMREG_CTRL);
   4207 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4208 	CSR_WRITE_FLUSH(sc);
   4209 
   4210 	delay(sc->phy.reset_delay_us);
   4211 
   4212 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4213 	CSR_WRITE_FLUSH(sc);
   4214 
   4215 	delay(150);
   4216 
   4217 	sc->phy.release(sc);
   4218 
   4219 	wm_get_cfg_done(sc);
   4220 	wm_phy_post_reset(sc);
   4221 }
   4222 
   4223 /*
   4224  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4225  * so it is enough to check sc->sc_queue[0] only.
   4226  */
   4227 static void
   4228 wm_flush_desc_rings(struct wm_softc *sc)
   4229 {
   4230 	pcireg_t preg;
   4231 	uint32_t reg;
   4232 	struct wm_txqueue *txq;
   4233 	wiseman_txdesc_t *txd;
   4234 	int nexttx;
   4235 	uint32_t rctl;
   4236 
   4237 	/* First, disable MULR fix in FEXTNVM11 */
   4238 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4239 	reg |= FEXTNVM11_DIS_MULRFIX;
   4240 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4241 
   4242 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4243 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4244 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4245 		return;
   4246 
   4247 	/* TX */
   4248 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4249 	    device_xname(sc->sc_dev), preg, reg);
   4250 	reg = CSR_READ(sc, WMREG_TCTL);
   4251 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4252 
   4253 	txq = &sc->sc_queue[0].wmq_txq;
   4254 	nexttx = txq->txq_next;
   4255 	txd = &txq->txq_descs[nexttx];
   4256 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4257 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   4258 	txd->wtx_fields.wtxu_status = 0;
   4259 	txd->wtx_fields.wtxu_options = 0;
   4260 	txd->wtx_fields.wtxu_vlan = 0;
   4261 
   4262 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4263 	    BUS_SPACE_BARRIER_WRITE);
   4264 
   4265 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4266 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4267 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4268 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4269 	delay(250);
   4270 
   4271 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4272 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4273 		return;
   4274 
   4275 	/* RX */
   4276 	printf("%s: Need RX flush (reg = %08x)\n",
   4277 	    device_xname(sc->sc_dev), preg);
   4278 	rctl = CSR_READ(sc, WMREG_RCTL);
   4279 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4280 	CSR_WRITE_FLUSH(sc);
   4281 	delay(150);
   4282 
   4283 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4284 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4285 	reg &= 0xffffc000;
   4286 	/*
   4287 	 * update thresholds: prefetch threshold to 31, host threshold
   4288 	 * to 1 and make sure the granularity is "descriptors" and not
   4289 	 * "cache lines"
   4290 	 */
   4291 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4292 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4293 
   4294 	/*
   4295 	 * momentarily enable the RX ring for the changes to take
   4296 	 * effect
   4297 	 */
   4298 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4299 	CSR_WRITE_FLUSH(sc);
   4300 	delay(150);
   4301 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4302 }
   4303 
   4304 /*
   4305  * wm_reset:
   4306  *
   4307  *	Reset the i82542 chip.
   4308  */
   4309 static void
   4310 wm_reset(struct wm_softc *sc)
   4311 {
   4312 	int phy_reset = 0;
   4313 	int i, error = 0;
   4314 	uint32_t reg;
   4315 	uint16_t kmreg;
   4316 	int rv;
   4317 
   4318 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4319 		device_xname(sc->sc_dev), __func__));
   4320 	KASSERT(sc->sc_type != 0);
   4321 
   4322 	/*
   4323 	 * Allocate on-chip memory according to the MTU size.
   4324 	 * The Packet Buffer Allocation register must be written
   4325 	 * before the chip is reset.
   4326 	 */
   4327 	switch (sc->sc_type) {
   4328 	case WM_T_82547:
   4329 	case WM_T_82547_2:
   4330 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4331 		    PBA_22K : PBA_30K;
   4332 		for (i = 0; i < sc->sc_nqueues; i++) {
   4333 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4334 			txq->txq_fifo_head = 0;
   4335 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4336 			txq->txq_fifo_size =
   4337 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4338 			txq->txq_fifo_stall = 0;
   4339 		}
   4340 		break;
   4341 	case WM_T_82571:
   4342 	case WM_T_82572:
   4343 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4344 	case WM_T_80003:
   4345 		sc->sc_pba = PBA_32K;
   4346 		break;
   4347 	case WM_T_82573:
   4348 		sc->sc_pba = PBA_12K;
   4349 		break;
   4350 	case WM_T_82574:
   4351 	case WM_T_82583:
   4352 		sc->sc_pba = PBA_20K;
   4353 		break;
   4354 	case WM_T_82576:
   4355 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4356 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4357 		break;
   4358 	case WM_T_82580:
   4359 	case WM_T_I350:
   4360 	case WM_T_I354:
   4361 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4362 		break;
   4363 	case WM_T_I210:
   4364 	case WM_T_I211:
   4365 		sc->sc_pba = PBA_34K;
   4366 		break;
   4367 	case WM_T_ICH8:
   4368 		/* Workaround for a bit corruption issue in FIFO memory */
   4369 		sc->sc_pba = PBA_8K;
   4370 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4371 		break;
   4372 	case WM_T_ICH9:
   4373 	case WM_T_ICH10:
   4374 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4375 		    PBA_14K : PBA_10K;
   4376 		break;
   4377 	case WM_T_PCH:
   4378 	case WM_T_PCH2:
   4379 	case WM_T_PCH_LPT:
   4380 	case WM_T_PCH_SPT:
   4381 		sc->sc_pba = PBA_26K;
   4382 		break;
   4383 	default:
   4384 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4385 		    PBA_40K : PBA_48K;
   4386 		break;
   4387 	}
   4388 	/*
   4389 	 * Only old or non-multiqueue devices have the PBA register
   4390 	 * XXX Need special handling for 82575.
   4391 	 */
   4392 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4393 	    || (sc->sc_type == WM_T_82575))
   4394 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4395 
   4396 	/* Prevent the PCI-E bus from sticking */
   4397 	if (sc->sc_flags & WM_F_PCIE) {
   4398 		int timeout = 800;
   4399 
   4400 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4401 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4402 
   4403 		while (timeout--) {
   4404 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4405 			    == 0)
   4406 				break;
   4407 			delay(100);
   4408 		}
   4409 		if (timeout == 0)
   4410 			device_printf(sc->sc_dev,
   4411 			    "failed to disable busmastering\n");
   4412 	}
   4413 
   4414 	/* Set the completion timeout for interface */
   4415 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4416 	    || (sc->sc_type == WM_T_82580)
   4417 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4418 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4419 		wm_set_pcie_completion_timeout(sc);
   4420 
   4421 	/* Clear interrupt */
   4422 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4423 	if (wm_is_using_msix(sc)) {
   4424 		if (sc->sc_type != WM_T_82574) {
   4425 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4426 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4427 		} else {
   4428 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4429 		}
   4430 	}
   4431 
   4432 	/* Stop the transmit and receive processes. */
   4433 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4434 	sc->sc_rctl &= ~RCTL_EN;
   4435 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4436 	CSR_WRITE_FLUSH(sc);
   4437 
   4438 	/* XXX set_tbi_sbp_82543() */
   4439 
   4440 	delay(10*1000);
   4441 
   4442 	/* Must acquire the MDIO ownership before MAC reset */
   4443 	switch (sc->sc_type) {
   4444 	case WM_T_82573:
   4445 	case WM_T_82574:
   4446 	case WM_T_82583:
   4447 		error = wm_get_hw_semaphore_82573(sc);
   4448 		break;
   4449 	default:
   4450 		break;
   4451 	}
   4452 
   4453 	/*
   4454 	 * 82541 Errata 29? & 82547 Errata 28?
   4455 	 * See also the description about PHY_RST bit in CTRL register
   4456 	 * in 8254x_GBe_SDM.pdf.
   4457 	 */
   4458 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4459 		CSR_WRITE(sc, WMREG_CTRL,
   4460 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4461 		CSR_WRITE_FLUSH(sc);
   4462 		delay(5000);
   4463 	}
   4464 
   4465 	switch (sc->sc_type) {
   4466 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4467 	case WM_T_82541:
   4468 	case WM_T_82541_2:
   4469 	case WM_T_82547:
   4470 	case WM_T_82547_2:
   4471 		/*
   4472 		 * On some chipsets, a reset through a memory-mapped write
   4473 		 * cycle can cause the chip to reset before completing the
   4474 		 * write cycle.  This causes major headache that can be
   4475 		 * avoided by issuing the reset via indirect register writes
   4476 		 * through I/O space.
   4477 		 *
   4478 		 * So, if we successfully mapped the I/O BAR at attach time,
   4479 		 * use that.  Otherwise, try our luck with a memory-mapped
   4480 		 * reset.
   4481 		 */
   4482 		if (sc->sc_flags & WM_F_IOH_VALID)
   4483 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4484 		else
   4485 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4486 		break;
   4487 	case WM_T_82545_3:
   4488 	case WM_T_82546_3:
   4489 		/* Use the shadow control register on these chips. */
   4490 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4491 		break;
   4492 	case WM_T_80003:
   4493 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4494 		sc->phy.acquire(sc);
   4495 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4496 		sc->phy.release(sc);
   4497 		break;
   4498 	case WM_T_ICH8:
   4499 	case WM_T_ICH9:
   4500 	case WM_T_ICH10:
   4501 	case WM_T_PCH:
   4502 	case WM_T_PCH2:
   4503 	case WM_T_PCH_LPT:
   4504 	case WM_T_PCH_SPT:
   4505 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4506 		if (wm_phy_resetisblocked(sc) == false) {
   4507 			/*
   4508 			 * Gate automatic PHY configuration by hardware on
   4509 			 * non-managed 82579
   4510 			 */
   4511 			if ((sc->sc_type == WM_T_PCH2)
   4512 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4513 				== 0))
   4514 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4515 
   4516 			reg |= CTRL_PHY_RESET;
   4517 			phy_reset = 1;
   4518 		} else
   4519 			printf("XXX reset is blocked!!!\n");
   4520 		sc->phy.acquire(sc);
   4521 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4522 		/* Don't insert a completion barrier when reset */
   4523 		delay(20*1000);
   4524 		mutex_exit(sc->sc_ich_phymtx);
   4525 		break;
   4526 	case WM_T_82580:
   4527 	case WM_T_I350:
   4528 	case WM_T_I354:
   4529 	case WM_T_I210:
   4530 	case WM_T_I211:
   4531 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4532 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4533 			CSR_WRITE_FLUSH(sc);
   4534 		delay(5000);
   4535 		break;
   4536 	case WM_T_82542_2_0:
   4537 	case WM_T_82542_2_1:
   4538 	case WM_T_82543:
   4539 	case WM_T_82540:
   4540 	case WM_T_82545:
   4541 	case WM_T_82546:
   4542 	case WM_T_82571:
   4543 	case WM_T_82572:
   4544 	case WM_T_82573:
   4545 	case WM_T_82574:
   4546 	case WM_T_82575:
   4547 	case WM_T_82576:
   4548 	case WM_T_82583:
   4549 	default:
   4550 		/* Everything else can safely use the documented method. */
   4551 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4552 		break;
   4553 	}
   4554 
   4555 	/* Must release the MDIO ownership after MAC reset */
   4556 	switch (sc->sc_type) {
   4557 	case WM_T_82573:
   4558 	case WM_T_82574:
   4559 	case WM_T_82583:
   4560 		if (error == 0)
   4561 			wm_put_hw_semaphore_82573(sc);
   4562 		break;
   4563 	default:
   4564 		break;
   4565 	}
   4566 
   4567 	if (phy_reset != 0)
   4568 		wm_get_cfg_done(sc);
   4569 
   4570 	/* reload EEPROM */
   4571 	switch (sc->sc_type) {
   4572 	case WM_T_82542_2_0:
   4573 	case WM_T_82542_2_1:
   4574 	case WM_T_82543:
   4575 	case WM_T_82544:
   4576 		delay(10);
   4577 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4578 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4579 		CSR_WRITE_FLUSH(sc);
   4580 		delay(2000);
   4581 		break;
   4582 	case WM_T_82540:
   4583 	case WM_T_82545:
   4584 	case WM_T_82545_3:
   4585 	case WM_T_82546:
   4586 	case WM_T_82546_3:
   4587 		delay(5*1000);
   4588 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4589 		break;
   4590 	case WM_T_82541:
   4591 	case WM_T_82541_2:
   4592 	case WM_T_82547:
   4593 	case WM_T_82547_2:
   4594 		delay(20000);
   4595 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4596 		break;
   4597 	case WM_T_82571:
   4598 	case WM_T_82572:
   4599 	case WM_T_82573:
   4600 	case WM_T_82574:
   4601 	case WM_T_82583:
   4602 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4603 			delay(10);
   4604 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4605 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4606 			CSR_WRITE_FLUSH(sc);
   4607 		}
   4608 		/* check EECD_EE_AUTORD */
   4609 		wm_get_auto_rd_done(sc);
   4610 		/*
   4611 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4612 		 * is set.
   4613 		 */
   4614 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4615 		    || (sc->sc_type == WM_T_82583))
   4616 			delay(25*1000);
   4617 		break;
   4618 	case WM_T_82575:
   4619 	case WM_T_82576:
   4620 	case WM_T_82580:
   4621 	case WM_T_I350:
   4622 	case WM_T_I354:
   4623 	case WM_T_I210:
   4624 	case WM_T_I211:
   4625 	case WM_T_80003:
   4626 		/* check EECD_EE_AUTORD */
   4627 		wm_get_auto_rd_done(sc);
   4628 		break;
   4629 	case WM_T_ICH8:
   4630 	case WM_T_ICH9:
   4631 	case WM_T_ICH10:
   4632 	case WM_T_PCH:
   4633 	case WM_T_PCH2:
   4634 	case WM_T_PCH_LPT:
   4635 	case WM_T_PCH_SPT:
   4636 		break;
   4637 	default:
   4638 		panic("%s: unknown type\n", __func__);
   4639 	}
   4640 
   4641 	/* Check whether EEPROM is present or not */
   4642 	switch (sc->sc_type) {
   4643 	case WM_T_82575:
   4644 	case WM_T_82576:
   4645 	case WM_T_82580:
   4646 	case WM_T_I350:
   4647 	case WM_T_I354:
   4648 	case WM_T_ICH8:
   4649 	case WM_T_ICH9:
   4650 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4651 			/* Not found */
   4652 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4653 			if (sc->sc_type == WM_T_82575)
   4654 				wm_reset_init_script_82575(sc);
   4655 		}
   4656 		break;
   4657 	default:
   4658 		break;
   4659 	}
   4660 
   4661 	if (phy_reset != 0)
   4662 		wm_phy_post_reset(sc);
   4663 
   4664 	if ((sc->sc_type == WM_T_82580)
   4665 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4666 		/* clear global device reset status bit */
   4667 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4668 	}
   4669 
   4670 	/* Clear any pending interrupt events. */
   4671 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4672 	reg = CSR_READ(sc, WMREG_ICR);
   4673 	if (wm_is_using_msix(sc)) {
   4674 		if (sc->sc_type != WM_T_82574) {
   4675 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4676 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4677 		} else
   4678 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4679 	}
   4680 
   4681 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4682 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4683 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4684 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4685 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4686 		reg |= KABGTXD_BGSQLBIAS;
   4687 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4688 	}
   4689 
   4690 	/* reload sc_ctrl */
   4691 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4692 
   4693 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4694 		wm_set_eee_i350(sc);
   4695 
   4696 	/*
   4697 	 * For PCH, this write will make sure that any noise will be detected
   4698 	 * as a CRC error and be dropped rather than show up as a bad packet
   4699 	 * to the DMA engine
   4700 	 */
   4701 	if (sc->sc_type == WM_T_PCH)
   4702 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4703 
   4704 	if (sc->sc_type >= WM_T_82544)
   4705 		CSR_WRITE(sc, WMREG_WUC, 0);
   4706 
   4707 	wm_reset_mdicnfg_82580(sc);
   4708 
   4709 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4710 		wm_pll_workaround_i210(sc);
   4711 
   4712 	if (sc->sc_type == WM_T_80003) {
   4713 		/* default to TRUE to enable the MDIC W/A */
   4714 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4715 
   4716 		rv = wm_kmrn_readreg(sc,
   4717 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4718 		if (rv == 0) {
   4719 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4720 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4721 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4722 			else
   4723 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4724 		}
   4725 	}
   4726 }
   4727 
   4728 /*
   4729  * wm_add_rxbuf:
   4730  *
   4731  *	Add a receive buffer to the indiciated descriptor.
   4732  */
   4733 static int
   4734 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4735 {
   4736 	struct wm_softc *sc = rxq->rxq_sc;
   4737 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4738 	struct mbuf *m;
   4739 	int error;
   4740 
   4741 	KASSERT(mutex_owned(rxq->rxq_lock));
   4742 
   4743 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4744 	if (m == NULL)
   4745 		return ENOBUFS;
   4746 
   4747 	MCLGET(m, M_DONTWAIT);
   4748 	if ((m->m_flags & M_EXT) == 0) {
   4749 		m_freem(m);
   4750 		return ENOBUFS;
   4751 	}
   4752 
   4753 	if (rxs->rxs_mbuf != NULL)
   4754 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4755 
   4756 	rxs->rxs_mbuf = m;
   4757 
   4758 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4759 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4760 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4761 	if (error) {
   4762 		/* XXX XXX XXX */
   4763 		aprint_error_dev(sc->sc_dev,
   4764 		    "unable to load rx DMA map %d, error = %d\n",
   4765 		    idx, error);
   4766 		panic("wm_add_rxbuf");
   4767 	}
   4768 
   4769 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4770 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4771 
   4772 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4773 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4774 			wm_init_rxdesc(rxq, idx);
   4775 	} else
   4776 		wm_init_rxdesc(rxq, idx);
   4777 
   4778 	return 0;
   4779 }
   4780 
   4781 /*
   4782  * wm_rxdrain:
   4783  *
   4784  *	Drain the receive queue.
   4785  */
   4786 static void
   4787 wm_rxdrain(struct wm_rxqueue *rxq)
   4788 {
   4789 	struct wm_softc *sc = rxq->rxq_sc;
   4790 	struct wm_rxsoft *rxs;
   4791 	int i;
   4792 
   4793 	KASSERT(mutex_owned(rxq->rxq_lock));
   4794 
   4795 	for (i = 0; i < WM_NRXDESC; i++) {
   4796 		rxs = &rxq->rxq_soft[i];
   4797 		if (rxs->rxs_mbuf != NULL) {
   4798 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4799 			m_freem(rxs->rxs_mbuf);
   4800 			rxs->rxs_mbuf = NULL;
   4801 		}
   4802 	}
   4803 }
   4804 
   4805 
   4806 /*
   4807  * XXX copy from FreeBSD's sys/net/rss_config.c
   4808  */
   4809 /*
   4810  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4811  * effectiveness may be limited by algorithm choice and available entropy
   4812  * during the boot.
   4813  *
   4814  * XXXRW: And that we don't randomize it yet!
   4815  *
   4816  * This is the default Microsoft RSS specification key which is also
   4817  * the Chelsio T5 firmware default key.
   4818  */
   4819 #define RSS_KEYSIZE 40
   4820 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4821 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4822 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4823 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4824 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4825 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4826 };
   4827 
   4828 /*
   4829  * Caller must pass an array of size sizeof(rss_key).
   4830  *
   4831  * XXX
   4832  * As if_ixgbe may use this function, this function should not be
   4833  * if_wm specific function.
   4834  */
   4835 static void
   4836 wm_rss_getkey(uint8_t *key)
   4837 {
   4838 
   4839 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4840 }
   4841 
   4842 /*
   4843  * Setup registers for RSS.
   4844  *
   4845  * XXX not yet VMDq support
   4846  */
   4847 static void
   4848 wm_init_rss(struct wm_softc *sc)
   4849 {
   4850 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4851 	int i;
   4852 
   4853 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4854 
   4855 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4856 		int qid, reta_ent;
   4857 
   4858 		qid  = i % sc->sc_nqueues;
   4859 		switch(sc->sc_type) {
   4860 		case WM_T_82574:
   4861 			reta_ent = __SHIFTIN(qid,
   4862 			    RETA_ENT_QINDEX_MASK_82574);
   4863 			break;
   4864 		case WM_T_82575:
   4865 			reta_ent = __SHIFTIN(qid,
   4866 			    RETA_ENT_QINDEX1_MASK_82575);
   4867 			break;
   4868 		default:
   4869 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4870 			break;
   4871 		}
   4872 
   4873 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4874 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4875 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4876 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4877 	}
   4878 
   4879 	wm_rss_getkey((uint8_t *)rss_key);
   4880 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4881 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4882 
   4883 	if (sc->sc_type == WM_T_82574)
   4884 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4885 	else
   4886 		mrqc = MRQC_ENABLE_RSS_MQ;
   4887 
   4888 	/*
   4889 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4890 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4891 	 */
   4892 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4893 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4894 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4895 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4896 
   4897 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4898 }
   4899 
   4900 /*
   4901  * Adjust TX and RX queue numbers which the system actulally uses.
   4902  *
   4903  * The numbers are affected by below parameters.
   4904  *     - The nubmer of hardware queues
   4905  *     - The number of MSI-X vectors (= "nvectors" argument)
   4906  *     - ncpu
   4907  */
   4908 static void
   4909 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4910 {
   4911 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4912 
   4913 	if (nvectors < 2) {
   4914 		sc->sc_nqueues = 1;
   4915 		return;
   4916 	}
   4917 
   4918 	switch(sc->sc_type) {
   4919 	case WM_T_82572:
   4920 		hw_ntxqueues = 2;
   4921 		hw_nrxqueues = 2;
   4922 		break;
   4923 	case WM_T_82574:
   4924 		hw_ntxqueues = 2;
   4925 		hw_nrxqueues = 2;
   4926 		break;
   4927 	case WM_T_82575:
   4928 		hw_ntxqueues = 4;
   4929 		hw_nrxqueues = 4;
   4930 		break;
   4931 	case WM_T_82576:
   4932 		hw_ntxqueues = 16;
   4933 		hw_nrxqueues = 16;
   4934 		break;
   4935 	case WM_T_82580:
   4936 	case WM_T_I350:
   4937 	case WM_T_I354:
   4938 		hw_ntxqueues = 8;
   4939 		hw_nrxqueues = 8;
   4940 		break;
   4941 	case WM_T_I210:
   4942 		hw_ntxqueues = 4;
   4943 		hw_nrxqueues = 4;
   4944 		break;
   4945 	case WM_T_I211:
   4946 		hw_ntxqueues = 2;
   4947 		hw_nrxqueues = 2;
   4948 		break;
   4949 		/*
   4950 		 * As below ethernet controllers does not support MSI-X,
   4951 		 * this driver let them not use multiqueue.
   4952 		 *     - WM_T_80003
   4953 		 *     - WM_T_ICH8
   4954 		 *     - WM_T_ICH9
   4955 		 *     - WM_T_ICH10
   4956 		 *     - WM_T_PCH
   4957 		 *     - WM_T_PCH2
   4958 		 *     - WM_T_PCH_LPT
   4959 		 */
   4960 	default:
   4961 		hw_ntxqueues = 1;
   4962 		hw_nrxqueues = 1;
   4963 		break;
   4964 	}
   4965 
   4966 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4967 
   4968 	/*
   4969 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4970 	 * the number of queues used actually.
   4971 	 */
   4972 	if (nvectors < hw_nqueues + 1) {
   4973 		sc->sc_nqueues = nvectors - 1;
   4974 	} else {
   4975 		sc->sc_nqueues = hw_nqueues;
   4976 	}
   4977 
   4978 	/*
   4979 	 * As queues more then cpus cannot improve scaling, we limit
   4980 	 * the number of queues used actually.
   4981 	 */
   4982 	if (ncpu < sc->sc_nqueues)
   4983 		sc->sc_nqueues = ncpu;
   4984 }
   4985 
   4986 static inline bool
   4987 wm_is_using_msix(struct wm_softc *sc)
   4988 {
   4989 
   4990 	return (sc->sc_nintrs > 1);
   4991 }
   4992 
   4993 static inline bool
   4994 wm_is_using_multiqueue(struct wm_softc *sc)
   4995 {
   4996 
   4997 	return (sc->sc_nqueues > 1);
   4998 }
   4999 
   5000 static int
   5001 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5002 {
   5003 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5004 	wmq->wmq_id = qidx;
   5005 	wmq->wmq_intr_idx = intr_idx;
   5006 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5007 #ifdef WM_MPSAFE
   5008 	    | SOFTINT_MPSAFE
   5009 #endif
   5010 	    , wm_handle_queue, wmq);
   5011 	if (wmq->wmq_si != NULL)
   5012 		return 0;
   5013 
   5014 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5015 	    wmq->wmq_id);
   5016 
   5017 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5018 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5019 	return ENOMEM;
   5020 }
   5021 
   5022 /*
   5023  * Both single interrupt MSI and INTx can use this function.
   5024  */
   5025 static int
   5026 wm_setup_legacy(struct wm_softc *sc)
   5027 {
   5028 	pci_chipset_tag_t pc = sc->sc_pc;
   5029 	const char *intrstr = NULL;
   5030 	char intrbuf[PCI_INTRSTR_LEN];
   5031 	int error;
   5032 
   5033 	error = wm_alloc_txrx_queues(sc);
   5034 	if (error) {
   5035 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5036 		    error);
   5037 		return ENOMEM;
   5038 	}
   5039 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5040 	    sizeof(intrbuf));
   5041 #ifdef WM_MPSAFE
   5042 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5043 #endif
   5044 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5045 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5046 	if (sc->sc_ihs[0] == NULL) {
   5047 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5048 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5049 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5050 		return ENOMEM;
   5051 	}
   5052 
   5053 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5054 	sc->sc_nintrs = 1;
   5055 
   5056 	return wm_softint_establish(sc, 0, 0);
   5057 }
   5058 
   5059 static int
   5060 wm_setup_msix(struct wm_softc *sc)
   5061 {
   5062 	void *vih;
   5063 	kcpuset_t *affinity;
   5064 	int qidx, error, intr_idx, txrx_established;
   5065 	pci_chipset_tag_t pc = sc->sc_pc;
   5066 	const char *intrstr = NULL;
   5067 	char intrbuf[PCI_INTRSTR_LEN];
   5068 	char intr_xname[INTRDEVNAMEBUF];
   5069 
   5070 	if (sc->sc_nqueues < ncpu) {
   5071 		/*
   5072 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5073 		 * interrupts start from CPU#1.
   5074 		 */
   5075 		sc->sc_affinity_offset = 1;
   5076 	} else {
   5077 		/*
   5078 		 * In this case, this device use all CPUs. So, we unify
   5079 		 * affinitied cpu_index to msix vector number for readability.
   5080 		 */
   5081 		sc->sc_affinity_offset = 0;
   5082 	}
   5083 
   5084 	error = wm_alloc_txrx_queues(sc);
   5085 	if (error) {
   5086 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5087 		    error);
   5088 		return ENOMEM;
   5089 	}
   5090 
   5091 	kcpuset_create(&affinity, false);
   5092 	intr_idx = 0;
   5093 
   5094 	/*
   5095 	 * TX and RX
   5096 	 */
   5097 	txrx_established = 0;
   5098 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5099 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5100 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5101 
   5102 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5103 		    sizeof(intrbuf));
   5104 #ifdef WM_MPSAFE
   5105 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5106 		    PCI_INTR_MPSAFE, true);
   5107 #endif
   5108 		memset(intr_xname, 0, sizeof(intr_xname));
   5109 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5110 		    device_xname(sc->sc_dev), qidx);
   5111 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5112 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5113 		if (vih == NULL) {
   5114 			aprint_error_dev(sc->sc_dev,
   5115 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5116 			    intrstr ? " at " : "",
   5117 			    intrstr ? intrstr : "");
   5118 
   5119 			goto fail;
   5120 		}
   5121 		kcpuset_zero(affinity);
   5122 		/* Round-robin affinity */
   5123 		kcpuset_set(affinity, affinity_to);
   5124 		error = interrupt_distribute(vih, affinity, NULL);
   5125 		if (error == 0) {
   5126 			aprint_normal_dev(sc->sc_dev,
   5127 			    "for TX and RX interrupting at %s affinity to %u\n",
   5128 			    intrstr, affinity_to);
   5129 		} else {
   5130 			aprint_normal_dev(sc->sc_dev,
   5131 			    "for TX and RX interrupting at %s\n", intrstr);
   5132 		}
   5133 		sc->sc_ihs[intr_idx] = vih;
   5134 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5135 			goto fail;
   5136 		txrx_established++;
   5137 		intr_idx++;
   5138 	}
   5139 
   5140 	/*
   5141 	 * LINK
   5142 	 */
   5143 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5144 	    sizeof(intrbuf));
   5145 #ifdef WM_MPSAFE
   5146 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5147 #endif
   5148 	memset(intr_xname, 0, sizeof(intr_xname));
   5149 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5150 	    device_xname(sc->sc_dev));
   5151 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5152 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5153 	if (vih == NULL) {
   5154 		aprint_error_dev(sc->sc_dev,
   5155 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5156 		    intrstr ? " at " : "",
   5157 		    intrstr ? intrstr : "");
   5158 
   5159 		goto fail;
   5160 	}
   5161 	/* keep default affinity to LINK interrupt */
   5162 	aprint_normal_dev(sc->sc_dev,
   5163 	    "for LINK interrupting at %s\n", intrstr);
   5164 	sc->sc_ihs[intr_idx] = vih;
   5165 	sc->sc_link_intr_idx = intr_idx;
   5166 
   5167 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5168 	kcpuset_destroy(affinity);
   5169 	return 0;
   5170 
   5171  fail:
   5172 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5173 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5174 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5175 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5176 	}
   5177 
   5178 	kcpuset_destroy(affinity);
   5179 	return ENOMEM;
   5180 }
   5181 
   5182 static void
   5183 wm_unset_stopping_flags(struct wm_softc *sc)
   5184 {
   5185 	int i;
   5186 
   5187 	KASSERT(WM_CORE_LOCKED(sc));
   5188 
   5189 	/*
   5190 	 * must unset stopping flags in ascending order.
   5191 	 */
   5192 	for(i = 0; i < sc->sc_nqueues; i++) {
   5193 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5194 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5195 
   5196 		mutex_enter(txq->txq_lock);
   5197 		txq->txq_stopping = false;
   5198 		mutex_exit(txq->txq_lock);
   5199 
   5200 		mutex_enter(rxq->rxq_lock);
   5201 		rxq->rxq_stopping = false;
   5202 		mutex_exit(rxq->rxq_lock);
   5203 	}
   5204 
   5205 	sc->sc_core_stopping = false;
   5206 }
   5207 
   5208 static void
   5209 wm_set_stopping_flags(struct wm_softc *sc)
   5210 {
   5211 	int i;
   5212 
   5213 	KASSERT(WM_CORE_LOCKED(sc));
   5214 
   5215 	sc->sc_core_stopping = true;
   5216 
   5217 	/*
   5218 	 * must set stopping flags in ascending order.
   5219 	 */
   5220 	for(i = 0; i < sc->sc_nqueues; i++) {
   5221 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5222 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5223 
   5224 		mutex_enter(rxq->rxq_lock);
   5225 		rxq->rxq_stopping = true;
   5226 		mutex_exit(rxq->rxq_lock);
   5227 
   5228 		mutex_enter(txq->txq_lock);
   5229 		txq->txq_stopping = true;
   5230 		mutex_exit(txq->txq_lock);
   5231 	}
   5232 }
   5233 
   5234 /*
   5235  * write interrupt interval value to ITR or EITR
   5236  */
   5237 static void
   5238 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5239 {
   5240 
   5241 	if (!wmq->wmq_set_itr)
   5242 		return;
   5243 
   5244 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5245 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5246 
   5247 		/*
   5248 		 * 82575 doesn't have CNT_INGR field.
   5249 		 * So, overwrite counter field by software.
   5250 		 */
   5251 		if (sc->sc_type == WM_T_82575)
   5252 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5253 		else
   5254 			eitr |= EITR_CNT_INGR;
   5255 
   5256 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5257 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5258 		/*
   5259 		 * 82574 has both ITR and EITR. SET EITR when we use
   5260 		 * the multi queue function with MSI-X.
   5261 		 */
   5262 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5263 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5264 	} else {
   5265 		KASSERT(wmq->wmq_id == 0);
   5266 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5267 	}
   5268 
   5269 	wmq->wmq_set_itr = false;
   5270 }
   5271 
   5272 /*
   5273  * TODO
   5274  * Below dynamic calculation of itr is almost the same as linux igb,
   5275  * however it does not fit to wm(4). So, we will have been disable AIM
   5276  * until we will find appropriate calculation of itr.
   5277  */
   5278 /*
   5279  * calculate interrupt interval value to be going to write register in
   5280  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5281  */
   5282 static void
   5283 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5284 {
   5285 #ifdef NOTYET
   5286 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5287 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5288 	uint32_t avg_size = 0;
   5289 	uint32_t new_itr;
   5290 
   5291 	if (rxq->rxq_packets)
   5292 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5293 	if (txq->txq_packets)
   5294 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5295 
   5296 	if (avg_size == 0) {
   5297 		new_itr = 450; /* restore default value */
   5298 		goto out;
   5299 	}
   5300 
   5301 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5302 	avg_size += 24;
   5303 
   5304 	/* Don't starve jumbo frames */
   5305 	avg_size = min(avg_size, 3000);
   5306 
   5307 	/* Give a little boost to mid-size frames */
   5308 	if ((avg_size > 300) && (avg_size < 1200))
   5309 		new_itr = avg_size / 3;
   5310 	else
   5311 		new_itr = avg_size / 2;
   5312 
   5313 out:
   5314 	/*
   5315 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5316 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5317 	 */
   5318 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5319 		new_itr *= 4;
   5320 
   5321 	if (new_itr != wmq->wmq_itr) {
   5322 		wmq->wmq_itr = new_itr;
   5323 		wmq->wmq_set_itr = true;
   5324 	} else
   5325 		wmq->wmq_set_itr = false;
   5326 
   5327 	rxq->rxq_packets = 0;
   5328 	rxq->rxq_bytes = 0;
   5329 	txq->txq_packets = 0;
   5330 	txq->txq_bytes = 0;
   5331 #endif
   5332 }
   5333 
   5334 /*
   5335  * wm_init:		[ifnet interface function]
   5336  *
   5337  *	Initialize the interface.
   5338  */
   5339 static int
   5340 wm_init(struct ifnet *ifp)
   5341 {
   5342 	struct wm_softc *sc = ifp->if_softc;
   5343 	int ret;
   5344 
   5345 	WM_CORE_LOCK(sc);
   5346 	ret = wm_init_locked(ifp);
   5347 	WM_CORE_UNLOCK(sc);
   5348 
   5349 	return ret;
   5350 }
   5351 
   5352 static int
   5353 wm_init_locked(struct ifnet *ifp)
   5354 {
   5355 	struct wm_softc *sc = ifp->if_softc;
   5356 	int i, j, trynum, error = 0;
   5357 	uint32_t reg;
   5358 
   5359 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5360 		device_xname(sc->sc_dev), __func__));
   5361 	KASSERT(WM_CORE_LOCKED(sc));
   5362 
   5363 	/*
   5364 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5365 	 * There is a small but measurable benefit to avoiding the adjusment
   5366 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5367 	 * on such platforms.  One possibility is that the DMA itself is
   5368 	 * slightly more efficient if the front of the entire packet (instead
   5369 	 * of the front of the headers) is aligned.
   5370 	 *
   5371 	 * Note we must always set align_tweak to 0 if we are using
   5372 	 * jumbo frames.
   5373 	 */
   5374 #ifdef __NO_STRICT_ALIGNMENT
   5375 	sc->sc_align_tweak = 0;
   5376 #else
   5377 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5378 		sc->sc_align_tweak = 0;
   5379 	else
   5380 		sc->sc_align_tweak = 2;
   5381 #endif /* __NO_STRICT_ALIGNMENT */
   5382 
   5383 	/* Cancel any pending I/O. */
   5384 	wm_stop_locked(ifp, 0);
   5385 
   5386 	/* update statistics before reset */
   5387 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5388 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5389 
   5390 	/* PCH_SPT hardware workaround */
   5391 	if (sc->sc_type == WM_T_PCH_SPT)
   5392 		wm_flush_desc_rings(sc);
   5393 
   5394 	/* Reset the chip to a known state. */
   5395 	wm_reset(sc);
   5396 
   5397 	/*
   5398 	 * AMT based hardware can now take control from firmware
   5399 	 * Do this after reset.
   5400 	 */
   5401 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5402 		wm_get_hw_control(sc);
   5403 
   5404 	if ((sc->sc_type == WM_T_PCH_SPT) &&
   5405 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5406 		wm_legacy_irq_quirk_spt(sc);
   5407 
   5408 	/* Init hardware bits */
   5409 	wm_initialize_hardware_bits(sc);
   5410 
   5411 	/* Reset the PHY. */
   5412 	if (sc->sc_flags & WM_F_HAS_MII)
   5413 		wm_gmii_reset(sc);
   5414 
   5415 	/* Calculate (E)ITR value */
   5416 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5417 		/*
   5418 		 * For NEWQUEUE's EITR (except for 82575).
   5419 		 * 82575's EITR should be set same throttling value as other
   5420 		 * old controllers' ITR because the interrupt/sec calculation
   5421 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5422 		 *
   5423 		 * 82574's EITR should be set same throttling value as ITR.
   5424 		 *
   5425 		 * For N interrupts/sec, set this value to:
   5426 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5427 		 */
   5428 		sc->sc_itr_init = 450;
   5429 	} else if (sc->sc_type >= WM_T_82543) {
   5430 		/*
   5431 		 * Set up the interrupt throttling register (units of 256ns)
   5432 		 * Note that a footnote in Intel's documentation says this
   5433 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5434 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5435 		 * that that is also true for the 1024ns units of the other
   5436 		 * interrupt-related timer registers -- so, really, we ought
   5437 		 * to divide this value by 4 when the link speed is low.
   5438 		 *
   5439 		 * XXX implement this division at link speed change!
   5440 		 */
   5441 
   5442 		/*
   5443 		 * For N interrupts/sec, set this value to:
   5444 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5445 		 * absolute and packet timer values to this value
   5446 		 * divided by 4 to get "simple timer" behavior.
   5447 		 */
   5448 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5449 	}
   5450 
   5451 	error = wm_init_txrx_queues(sc);
   5452 	if (error)
   5453 		goto out;
   5454 
   5455 	/*
   5456 	 * Clear out the VLAN table -- we don't use it (yet).
   5457 	 */
   5458 	CSR_WRITE(sc, WMREG_VET, 0);
   5459 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5460 		trynum = 10; /* Due to hw errata */
   5461 	else
   5462 		trynum = 1;
   5463 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5464 		for (j = 0; j < trynum; j++)
   5465 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5466 
   5467 	/*
   5468 	 * Set up flow-control parameters.
   5469 	 *
   5470 	 * XXX Values could probably stand some tuning.
   5471 	 */
   5472 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5473 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5474 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5475 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5476 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5477 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5478 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5479 	}
   5480 
   5481 	sc->sc_fcrtl = FCRTL_DFLT;
   5482 	if (sc->sc_type < WM_T_82543) {
   5483 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5484 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5485 	} else {
   5486 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5487 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5488 	}
   5489 
   5490 	if (sc->sc_type == WM_T_80003)
   5491 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5492 	else
   5493 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5494 
   5495 	/* Writes the control register. */
   5496 	wm_set_vlan(sc);
   5497 
   5498 	if (sc->sc_flags & WM_F_HAS_MII) {
   5499 		uint16_t kmreg;
   5500 
   5501 		switch (sc->sc_type) {
   5502 		case WM_T_80003:
   5503 		case WM_T_ICH8:
   5504 		case WM_T_ICH9:
   5505 		case WM_T_ICH10:
   5506 		case WM_T_PCH:
   5507 		case WM_T_PCH2:
   5508 		case WM_T_PCH_LPT:
   5509 		case WM_T_PCH_SPT:
   5510 			/*
   5511 			 * Set the mac to wait the maximum time between each
   5512 			 * iteration and increase the max iterations when
   5513 			 * polling the phy; this fixes erroneous timeouts at
   5514 			 * 10Mbps.
   5515 			 */
   5516 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5517 			    0xFFFF);
   5518 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5519 			    &kmreg);
   5520 			kmreg |= 0x3F;
   5521 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5522 			    kmreg);
   5523 			break;
   5524 		default:
   5525 			break;
   5526 		}
   5527 
   5528 		if (sc->sc_type == WM_T_80003) {
   5529 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5530 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5531 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5532 
   5533 			/* Bypass RX and TX FIFO's */
   5534 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5535 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5536 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5537 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5538 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5539 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5540 		}
   5541 	}
   5542 #if 0
   5543 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5544 #endif
   5545 
   5546 	/* Set up checksum offload parameters. */
   5547 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5548 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5549 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5550 		reg |= RXCSUM_IPOFL;
   5551 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5552 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5553 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5554 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5555 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5556 
   5557 	/* Set registers about MSI-X */
   5558 	if (wm_is_using_msix(sc)) {
   5559 		uint32_t ivar;
   5560 		struct wm_queue *wmq;
   5561 		int qid, qintr_idx;
   5562 
   5563 		if (sc->sc_type == WM_T_82575) {
   5564 			/* Interrupt control */
   5565 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5566 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5567 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5568 
   5569 			/* TX and RX */
   5570 			for (i = 0; i < sc->sc_nqueues; i++) {
   5571 				wmq = &sc->sc_queue[i];
   5572 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5573 				    EITR_TX_QUEUE(wmq->wmq_id)
   5574 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5575 			}
   5576 			/* Link status */
   5577 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5578 			    EITR_OTHER);
   5579 		} else if (sc->sc_type == WM_T_82574) {
   5580 			/* Interrupt control */
   5581 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5582 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5583 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5584 
   5585 			/*
   5586 			 * workaround issue with spurious interrupts
   5587 			 * in MSI-X mode.
   5588 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5589 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5590 			 */
   5591 			reg = CSR_READ(sc, WMREG_RFCTL);
   5592 			reg |= WMREG_RFCTL_ACKDIS;
   5593 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5594 
   5595 			ivar = 0;
   5596 			/* TX and RX */
   5597 			for (i = 0; i < sc->sc_nqueues; i++) {
   5598 				wmq = &sc->sc_queue[i];
   5599 				qid = wmq->wmq_id;
   5600 				qintr_idx = wmq->wmq_intr_idx;
   5601 
   5602 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5603 				    IVAR_TX_MASK_Q_82574(qid));
   5604 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5605 				    IVAR_RX_MASK_Q_82574(qid));
   5606 			}
   5607 			/* Link status */
   5608 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5609 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5610 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5611 		} else {
   5612 			/* Interrupt control */
   5613 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5614 			    | GPIE_EIAME | GPIE_PBA);
   5615 
   5616 			switch (sc->sc_type) {
   5617 			case WM_T_82580:
   5618 			case WM_T_I350:
   5619 			case WM_T_I354:
   5620 			case WM_T_I210:
   5621 			case WM_T_I211:
   5622 				/* TX and RX */
   5623 				for (i = 0; i < sc->sc_nqueues; i++) {
   5624 					wmq = &sc->sc_queue[i];
   5625 					qid = wmq->wmq_id;
   5626 					qintr_idx = wmq->wmq_intr_idx;
   5627 
   5628 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5629 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5630 					ivar |= __SHIFTIN((qintr_idx
   5631 						| IVAR_VALID),
   5632 					    IVAR_TX_MASK_Q(qid));
   5633 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5634 					ivar |= __SHIFTIN((qintr_idx
   5635 						| IVAR_VALID),
   5636 					    IVAR_RX_MASK_Q(qid));
   5637 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5638 				}
   5639 				break;
   5640 			case WM_T_82576:
   5641 				/* TX and RX */
   5642 				for (i = 0; i < sc->sc_nqueues; i++) {
   5643 					wmq = &sc->sc_queue[i];
   5644 					qid = wmq->wmq_id;
   5645 					qintr_idx = wmq->wmq_intr_idx;
   5646 
   5647 					ivar = CSR_READ(sc,
   5648 					    WMREG_IVAR_Q_82576(qid));
   5649 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5650 					ivar |= __SHIFTIN((qintr_idx
   5651 						| IVAR_VALID),
   5652 					    IVAR_TX_MASK_Q_82576(qid));
   5653 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5654 					ivar |= __SHIFTIN((qintr_idx
   5655 						| IVAR_VALID),
   5656 					    IVAR_RX_MASK_Q_82576(qid));
   5657 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5658 					    ivar);
   5659 				}
   5660 				break;
   5661 			default:
   5662 				break;
   5663 			}
   5664 
   5665 			/* Link status */
   5666 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5667 			    IVAR_MISC_OTHER);
   5668 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5669 		}
   5670 
   5671 		if (wm_is_using_multiqueue(sc)) {
   5672 			wm_init_rss(sc);
   5673 
   5674 			/*
   5675 			** NOTE: Receive Full-Packet Checksum Offload
   5676 			** is mutually exclusive with Multiqueue. However
   5677 			** this is not the same as TCP/IP checksums which
   5678 			** still work.
   5679 			*/
   5680 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5681 			reg |= RXCSUM_PCSD;
   5682 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5683 		}
   5684 	}
   5685 
   5686 	/* Set up the interrupt registers. */
   5687 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5688 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5689 	    ICR_RXO | ICR_RXT0;
   5690 	if (wm_is_using_msix(sc)) {
   5691 		uint32_t mask;
   5692 		struct wm_queue *wmq;
   5693 
   5694 		switch (sc->sc_type) {
   5695 		case WM_T_82574:
   5696 			mask = 0;
   5697 			for (i = 0; i < sc->sc_nqueues; i++) {
   5698 				wmq = &sc->sc_queue[i];
   5699 				mask |= ICR_TXQ(wmq->wmq_id);
   5700 				mask |= ICR_RXQ(wmq->wmq_id);
   5701 			}
   5702 			mask |= ICR_OTHER;
   5703 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5704 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5705 			break;
   5706 		default:
   5707 			if (sc->sc_type == WM_T_82575) {
   5708 				mask = 0;
   5709 				for (i = 0; i < sc->sc_nqueues; i++) {
   5710 					wmq = &sc->sc_queue[i];
   5711 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5712 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5713 				}
   5714 				mask |= EITR_OTHER;
   5715 			} else {
   5716 				mask = 0;
   5717 				for (i = 0; i < sc->sc_nqueues; i++) {
   5718 					wmq = &sc->sc_queue[i];
   5719 					mask |= 1 << wmq->wmq_intr_idx;
   5720 				}
   5721 				mask |= 1 << sc->sc_link_intr_idx;
   5722 			}
   5723 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5724 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5725 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5726 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5727 			break;
   5728 		}
   5729 	} else
   5730 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5731 
   5732 	/* Set up the inter-packet gap. */
   5733 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5734 
   5735 	if (sc->sc_type >= WM_T_82543) {
   5736 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5737 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5738 			wm_itrs_writereg(sc, wmq);
   5739 		}
   5740 		/*
   5741 		 * Link interrupts occur much less than TX
   5742 		 * interrupts and RX interrupts. So, we don't
   5743 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5744 		 * FreeBSD's if_igb.
   5745 		 */
   5746 	}
   5747 
   5748 	/* Set the VLAN ethernetype. */
   5749 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5750 
   5751 	/*
   5752 	 * Set up the transmit control register; we start out with
   5753 	 * a collision distance suitable for FDX, but update it whe
   5754 	 * we resolve the media type.
   5755 	 */
   5756 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5757 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5758 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5759 	if (sc->sc_type >= WM_T_82571)
   5760 		sc->sc_tctl |= TCTL_MULR;
   5761 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5762 
   5763 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5764 		/* Write TDT after TCTL.EN is set. See the document. */
   5765 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5766 	}
   5767 
   5768 	if (sc->sc_type == WM_T_80003) {
   5769 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5770 		reg &= ~TCTL_EXT_GCEX_MASK;
   5771 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5772 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5773 	}
   5774 
   5775 	/* Set the media. */
   5776 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5777 		goto out;
   5778 
   5779 	/* Configure for OS presence */
   5780 	wm_init_manageability(sc);
   5781 
   5782 	/*
   5783 	 * Set up the receive control register; we actually program
   5784 	 * the register when we set the receive filter.  Use multicast
   5785 	 * address offset type 0.
   5786 	 *
   5787 	 * Only the i82544 has the ability to strip the incoming
   5788 	 * CRC, so we don't enable that feature.
   5789 	 */
   5790 	sc->sc_mchash_type = 0;
   5791 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5792 	    | RCTL_MO(sc->sc_mchash_type);
   5793 
   5794 	/*
   5795 	 * 82574 use one buffer extended Rx descriptor.
   5796 	 */
   5797 	if (sc->sc_type == WM_T_82574)
   5798 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5799 
   5800 	/*
   5801 	 * The I350 has a bug where it always strips the CRC whether
   5802 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5803 	 */
   5804 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5805 	    || (sc->sc_type == WM_T_I210))
   5806 		sc->sc_rctl |= RCTL_SECRC;
   5807 
   5808 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5809 	    && (ifp->if_mtu > ETHERMTU)) {
   5810 		sc->sc_rctl |= RCTL_LPE;
   5811 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5812 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5813 	}
   5814 
   5815 	if (MCLBYTES == 2048) {
   5816 		sc->sc_rctl |= RCTL_2k;
   5817 	} else {
   5818 		if (sc->sc_type >= WM_T_82543) {
   5819 			switch (MCLBYTES) {
   5820 			case 4096:
   5821 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5822 				break;
   5823 			case 8192:
   5824 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5825 				break;
   5826 			case 16384:
   5827 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5828 				break;
   5829 			default:
   5830 				panic("wm_init: MCLBYTES %d unsupported",
   5831 				    MCLBYTES);
   5832 				break;
   5833 			}
   5834 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5835 	}
   5836 
   5837 	/* Enable ECC */
   5838 	switch (sc->sc_type) {
   5839 	case WM_T_82571:
   5840 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5841 		reg |= PBA_ECC_CORR_EN;
   5842 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5843 		break;
   5844 	case WM_T_PCH_LPT:
   5845 	case WM_T_PCH_SPT:
   5846 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5847 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5848 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5849 
   5850 		sc->sc_ctrl |= CTRL_MEHE;
   5851 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5852 		break;
   5853 	default:
   5854 		break;
   5855 	}
   5856 
   5857 	/*
   5858 	 * Set the receive filter.
   5859 	 *
   5860 	 * For 82575 and 82576, the RX descriptors must be initialized after
   5861 	 * the setting of RCTL.EN in wm_set_filter()
   5862 	 */
   5863 	wm_set_filter(sc);
   5864 
   5865 	/* On 575 and later set RDT only if RX enabled */
   5866 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5867 		int qidx;
   5868 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5869 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5870 			for (i = 0; i < WM_NRXDESC; i++) {
   5871 				mutex_enter(rxq->rxq_lock);
   5872 				wm_init_rxdesc(rxq, i);
   5873 				mutex_exit(rxq->rxq_lock);
   5874 
   5875 			}
   5876 		}
   5877 	}
   5878 
   5879 	wm_unset_stopping_flags(sc);
   5880 
   5881 	/* Start the one second link check clock. */
   5882 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5883 
   5884 	/* ...all done! */
   5885 	ifp->if_flags |= IFF_RUNNING;
   5886 	ifp->if_flags &= ~IFF_OACTIVE;
   5887 
   5888  out:
   5889 	sc->sc_if_flags = ifp->if_flags;
   5890 	if (error)
   5891 		log(LOG_ERR, "%s: interface not running\n",
   5892 		    device_xname(sc->sc_dev));
   5893 	return error;
   5894 }
   5895 
   5896 /*
   5897  * wm_stop:		[ifnet interface function]
   5898  *
   5899  *	Stop transmission on the interface.
   5900  */
   5901 static void
   5902 wm_stop(struct ifnet *ifp, int disable)
   5903 {
   5904 	struct wm_softc *sc = ifp->if_softc;
   5905 
   5906 	WM_CORE_LOCK(sc);
   5907 	wm_stop_locked(ifp, disable);
   5908 	WM_CORE_UNLOCK(sc);
   5909 }
   5910 
   5911 static void
   5912 wm_stop_locked(struct ifnet *ifp, int disable)
   5913 {
   5914 	struct wm_softc *sc = ifp->if_softc;
   5915 	struct wm_txsoft *txs;
   5916 	int i, qidx;
   5917 
   5918 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5919 		device_xname(sc->sc_dev), __func__));
   5920 	KASSERT(WM_CORE_LOCKED(sc));
   5921 
   5922 	wm_set_stopping_flags(sc);
   5923 
   5924 	/* Stop the one second clock. */
   5925 	callout_stop(&sc->sc_tick_ch);
   5926 
   5927 	/* Stop the 82547 Tx FIFO stall check timer. */
   5928 	if (sc->sc_type == WM_T_82547)
   5929 		callout_stop(&sc->sc_txfifo_ch);
   5930 
   5931 	if (sc->sc_flags & WM_F_HAS_MII) {
   5932 		/* Down the MII. */
   5933 		mii_down(&sc->sc_mii);
   5934 	} else {
   5935 #if 0
   5936 		/* Should we clear PHY's status properly? */
   5937 		wm_reset(sc);
   5938 #endif
   5939 	}
   5940 
   5941 	/* Stop the transmit and receive processes. */
   5942 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5943 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5944 	sc->sc_rctl &= ~RCTL_EN;
   5945 
   5946 	/*
   5947 	 * Clear the interrupt mask to ensure the device cannot assert its
   5948 	 * interrupt line.
   5949 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5950 	 * service any currently pending or shared interrupt.
   5951 	 */
   5952 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5953 	sc->sc_icr = 0;
   5954 	if (wm_is_using_msix(sc)) {
   5955 		if (sc->sc_type != WM_T_82574) {
   5956 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5957 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5958 		} else
   5959 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5960 	}
   5961 
   5962 	/* Release any queued transmit buffers. */
   5963 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5964 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5965 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5966 		mutex_enter(txq->txq_lock);
   5967 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5968 			txs = &txq->txq_soft[i];
   5969 			if (txs->txs_mbuf != NULL) {
   5970 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5971 				m_freem(txs->txs_mbuf);
   5972 				txs->txs_mbuf = NULL;
   5973 			}
   5974 		}
   5975 		mutex_exit(txq->txq_lock);
   5976 	}
   5977 
   5978 	/* Mark the interface as down and cancel the watchdog timer. */
   5979 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5980 	ifp->if_timer = 0;
   5981 
   5982 	if (disable) {
   5983 		for (i = 0; i < sc->sc_nqueues; i++) {
   5984 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5985 			mutex_enter(rxq->rxq_lock);
   5986 			wm_rxdrain(rxq);
   5987 			mutex_exit(rxq->rxq_lock);
   5988 		}
   5989 	}
   5990 
   5991 #if 0 /* notyet */
   5992 	if (sc->sc_type >= WM_T_82544)
   5993 		CSR_WRITE(sc, WMREG_WUC, 0);
   5994 #endif
   5995 }
   5996 
   5997 static void
   5998 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5999 {
   6000 	struct mbuf *m;
   6001 	int i;
   6002 
   6003 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6004 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6005 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6006 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6007 		    m->m_data, m->m_len, m->m_flags);
   6008 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6009 	    i, i == 1 ? "" : "s");
   6010 }
   6011 
   6012 /*
   6013  * wm_82547_txfifo_stall:
   6014  *
   6015  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6016  *	reset the FIFO pointers, and restart packet transmission.
   6017  */
   6018 static void
   6019 wm_82547_txfifo_stall(void *arg)
   6020 {
   6021 	struct wm_softc *sc = arg;
   6022 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6023 
   6024 	mutex_enter(txq->txq_lock);
   6025 
   6026 	if (txq->txq_stopping)
   6027 		goto out;
   6028 
   6029 	if (txq->txq_fifo_stall) {
   6030 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6031 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6032 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6033 			/*
   6034 			 * Packets have drained.  Stop transmitter, reset
   6035 			 * FIFO pointers, restart transmitter, and kick
   6036 			 * the packet queue.
   6037 			 */
   6038 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6039 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6040 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6041 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6042 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6043 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6044 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6045 			CSR_WRITE_FLUSH(sc);
   6046 
   6047 			txq->txq_fifo_head = 0;
   6048 			txq->txq_fifo_stall = 0;
   6049 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6050 		} else {
   6051 			/*
   6052 			 * Still waiting for packets to drain; try again in
   6053 			 * another tick.
   6054 			 */
   6055 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6056 		}
   6057 	}
   6058 
   6059 out:
   6060 	mutex_exit(txq->txq_lock);
   6061 }
   6062 
   6063 /*
   6064  * wm_82547_txfifo_bugchk:
   6065  *
   6066  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6067  *	prevent enqueueing a packet that would wrap around the end
   6068  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6069  *
   6070  *	We do this by checking the amount of space before the end
   6071  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   6072  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6073  *	the internal FIFO pointers to the beginning, and restart
   6074  *	transmission on the interface.
   6075  */
   6076 #define	WM_FIFO_HDR		0x10
   6077 #define	WM_82547_PAD_LEN	0x3e0
   6078 static int
   6079 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6080 {
   6081 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6082 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6083 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6084 
   6085 	/* Just return if already stalled. */
   6086 	if (txq->txq_fifo_stall)
   6087 		return 1;
   6088 
   6089 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6090 		/* Stall only occurs in half-duplex mode. */
   6091 		goto send_packet;
   6092 	}
   6093 
   6094 	if (len >= WM_82547_PAD_LEN + space) {
   6095 		txq->txq_fifo_stall = 1;
   6096 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6097 		return 1;
   6098 	}
   6099 
   6100  send_packet:
   6101 	txq->txq_fifo_head += len;
   6102 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6103 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6104 
   6105 	return 0;
   6106 }
   6107 
   6108 static int
   6109 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6110 {
   6111 	int error;
   6112 
   6113 	/*
   6114 	 * Allocate the control data structures, and create and load the
   6115 	 * DMA map for it.
   6116 	 *
   6117 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6118 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6119 	 * both sets within the same 4G segment.
   6120 	 */
   6121 	if (sc->sc_type < WM_T_82544)
   6122 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6123 	else
   6124 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6125 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6126 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6127 	else
   6128 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6129 
   6130 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6131 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6132 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6133 		aprint_error_dev(sc->sc_dev,
   6134 		    "unable to allocate TX control data, error = %d\n",
   6135 		    error);
   6136 		goto fail_0;
   6137 	}
   6138 
   6139 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6140 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6141 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6142 		aprint_error_dev(sc->sc_dev,
   6143 		    "unable to map TX control data, error = %d\n", error);
   6144 		goto fail_1;
   6145 	}
   6146 
   6147 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6148 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6149 		aprint_error_dev(sc->sc_dev,
   6150 		    "unable to create TX control data DMA map, error = %d\n",
   6151 		    error);
   6152 		goto fail_2;
   6153 	}
   6154 
   6155 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6156 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6157 		aprint_error_dev(sc->sc_dev,
   6158 		    "unable to load TX control data DMA map, error = %d\n",
   6159 		    error);
   6160 		goto fail_3;
   6161 	}
   6162 
   6163 	return 0;
   6164 
   6165  fail_3:
   6166 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6167  fail_2:
   6168 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6169 	    WM_TXDESCS_SIZE(txq));
   6170  fail_1:
   6171 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6172  fail_0:
   6173 	return error;
   6174 }
   6175 
   6176 static void
   6177 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6178 {
   6179 
   6180 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6181 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6182 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6183 	    WM_TXDESCS_SIZE(txq));
   6184 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6185 }
   6186 
   6187 static int
   6188 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6189 {
   6190 	int error;
   6191 	size_t rxq_descs_size;
   6192 
   6193 	/*
   6194 	 * Allocate the control data structures, and create and load the
   6195 	 * DMA map for it.
   6196 	 *
   6197 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6198 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6199 	 * both sets within the same 4G segment.
   6200 	 */
   6201 	rxq->rxq_ndesc = WM_NRXDESC;
   6202 	if (sc->sc_type == WM_T_82574)
   6203 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6204 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6205 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6206 	else
   6207 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6208 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6209 
   6210 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6211 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6212 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6213 		aprint_error_dev(sc->sc_dev,
   6214 		    "unable to allocate RX control data, error = %d\n",
   6215 		    error);
   6216 		goto fail_0;
   6217 	}
   6218 
   6219 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6220 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6221 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6222 		aprint_error_dev(sc->sc_dev,
   6223 		    "unable to map RX control data, error = %d\n", error);
   6224 		goto fail_1;
   6225 	}
   6226 
   6227 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6228 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6229 		aprint_error_dev(sc->sc_dev,
   6230 		    "unable to create RX control data DMA map, error = %d\n",
   6231 		    error);
   6232 		goto fail_2;
   6233 	}
   6234 
   6235 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6236 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6237 		aprint_error_dev(sc->sc_dev,
   6238 		    "unable to load RX control data DMA map, error = %d\n",
   6239 		    error);
   6240 		goto fail_3;
   6241 	}
   6242 
   6243 	return 0;
   6244 
   6245  fail_3:
   6246 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6247  fail_2:
   6248 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6249 	    rxq_descs_size);
   6250  fail_1:
   6251 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6252  fail_0:
   6253 	return error;
   6254 }
   6255 
   6256 static void
   6257 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6258 {
   6259 
   6260 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6261 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6262 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6263 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6264 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6265 }
   6266 
   6267 
   6268 static int
   6269 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6270 {
   6271 	int i, error;
   6272 
   6273 	/* Create the transmit buffer DMA maps. */
   6274 	WM_TXQUEUELEN(txq) =
   6275 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6276 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6277 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6278 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6279 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6280 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6281 			aprint_error_dev(sc->sc_dev,
   6282 			    "unable to create Tx DMA map %d, error = %d\n",
   6283 			    i, error);
   6284 			goto fail;
   6285 		}
   6286 	}
   6287 
   6288 	return 0;
   6289 
   6290  fail:
   6291 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6292 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6293 			bus_dmamap_destroy(sc->sc_dmat,
   6294 			    txq->txq_soft[i].txs_dmamap);
   6295 	}
   6296 	return error;
   6297 }
   6298 
   6299 static void
   6300 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6301 {
   6302 	int i;
   6303 
   6304 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6305 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6306 			bus_dmamap_destroy(sc->sc_dmat,
   6307 			    txq->txq_soft[i].txs_dmamap);
   6308 	}
   6309 }
   6310 
   6311 static int
   6312 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6313 {
   6314 	int i, error;
   6315 
   6316 	/* Create the receive buffer DMA maps. */
   6317 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6318 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6319 			    MCLBYTES, 0, 0,
   6320 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6321 			aprint_error_dev(sc->sc_dev,
   6322 			    "unable to create Rx DMA map %d error = %d\n",
   6323 			    i, error);
   6324 			goto fail;
   6325 		}
   6326 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6327 	}
   6328 
   6329 	return 0;
   6330 
   6331  fail:
   6332 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6333 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6334 			bus_dmamap_destroy(sc->sc_dmat,
   6335 			    rxq->rxq_soft[i].rxs_dmamap);
   6336 	}
   6337 	return error;
   6338 }
   6339 
   6340 static void
   6341 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6342 {
   6343 	int i;
   6344 
   6345 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6346 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6347 			bus_dmamap_destroy(sc->sc_dmat,
   6348 			    rxq->rxq_soft[i].rxs_dmamap);
   6349 	}
   6350 }
   6351 
   6352 /*
   6353  * wm_alloc_quques:
   6354  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6355  */
   6356 static int
   6357 wm_alloc_txrx_queues(struct wm_softc *sc)
   6358 {
   6359 	int i, error, tx_done, rx_done;
   6360 
   6361 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6362 	    KM_SLEEP);
   6363 	if (sc->sc_queue == NULL) {
   6364 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6365 		error = ENOMEM;
   6366 		goto fail_0;
   6367 	}
   6368 
   6369 	/*
   6370 	 * For transmission
   6371 	 */
   6372 	error = 0;
   6373 	tx_done = 0;
   6374 	for (i = 0; i < sc->sc_nqueues; i++) {
   6375 #ifdef WM_EVENT_COUNTERS
   6376 		int j;
   6377 		const char *xname;
   6378 #endif
   6379 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6380 		txq->txq_sc = sc;
   6381 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6382 
   6383 		error = wm_alloc_tx_descs(sc, txq);
   6384 		if (error)
   6385 			break;
   6386 		error = wm_alloc_tx_buffer(sc, txq);
   6387 		if (error) {
   6388 			wm_free_tx_descs(sc, txq);
   6389 			break;
   6390 		}
   6391 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6392 		if (txq->txq_interq == NULL) {
   6393 			wm_free_tx_descs(sc, txq);
   6394 			wm_free_tx_buffer(sc, txq);
   6395 			error = ENOMEM;
   6396 			break;
   6397 		}
   6398 
   6399 #ifdef WM_EVENT_COUNTERS
   6400 		xname = device_xname(sc->sc_dev);
   6401 
   6402 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6403 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6404 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6405 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6406 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6407 
   6408 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6409 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6410 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6411 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6412 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6413 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6414 
   6415 		for (j = 0; j < WM_NTXSEGS; j++) {
   6416 			snprintf(txq->txq_txseg_evcnt_names[j],
   6417 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6418 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6419 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6420 		}
   6421 
   6422 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6423 
   6424 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6425 #endif /* WM_EVENT_COUNTERS */
   6426 
   6427 		tx_done++;
   6428 	}
   6429 	if (error)
   6430 		goto fail_1;
   6431 
   6432 	/*
   6433 	 * For recieve
   6434 	 */
   6435 	error = 0;
   6436 	rx_done = 0;
   6437 	for (i = 0; i < sc->sc_nqueues; i++) {
   6438 #ifdef WM_EVENT_COUNTERS
   6439 		const char *xname;
   6440 #endif
   6441 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6442 		rxq->rxq_sc = sc;
   6443 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6444 
   6445 		error = wm_alloc_rx_descs(sc, rxq);
   6446 		if (error)
   6447 			break;
   6448 
   6449 		error = wm_alloc_rx_buffer(sc, rxq);
   6450 		if (error) {
   6451 			wm_free_rx_descs(sc, rxq);
   6452 			break;
   6453 		}
   6454 
   6455 #ifdef WM_EVENT_COUNTERS
   6456 		xname = device_xname(sc->sc_dev);
   6457 
   6458 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6459 
   6460 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6461 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6462 #endif /* WM_EVENT_COUNTERS */
   6463 
   6464 		rx_done++;
   6465 	}
   6466 	if (error)
   6467 		goto fail_2;
   6468 
   6469 	return 0;
   6470 
   6471  fail_2:
   6472 	for (i = 0; i < rx_done; i++) {
   6473 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6474 		wm_free_rx_buffer(sc, rxq);
   6475 		wm_free_rx_descs(sc, rxq);
   6476 		if (rxq->rxq_lock)
   6477 			mutex_obj_free(rxq->rxq_lock);
   6478 	}
   6479  fail_1:
   6480 	for (i = 0; i < tx_done; i++) {
   6481 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6482 		pcq_destroy(txq->txq_interq);
   6483 		wm_free_tx_buffer(sc, txq);
   6484 		wm_free_tx_descs(sc, txq);
   6485 		if (txq->txq_lock)
   6486 			mutex_obj_free(txq->txq_lock);
   6487 	}
   6488 
   6489 	kmem_free(sc->sc_queue,
   6490 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6491  fail_0:
   6492 	return error;
   6493 }
   6494 
   6495 /*
   6496  * wm_free_quques:
   6497  *	Free {tx,rx}descs and {tx,rx} buffers
   6498  */
   6499 static void
   6500 wm_free_txrx_queues(struct wm_softc *sc)
   6501 {
   6502 	int i;
   6503 
   6504 	for (i = 0; i < sc->sc_nqueues; i++) {
   6505 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6506 
   6507 #ifdef WM_EVENT_COUNTERS
   6508 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6509 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6510 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6511 #endif /* WM_EVENT_COUNTERS */
   6512 
   6513 		wm_free_rx_buffer(sc, rxq);
   6514 		wm_free_rx_descs(sc, rxq);
   6515 		if (rxq->rxq_lock)
   6516 			mutex_obj_free(rxq->rxq_lock);
   6517 	}
   6518 
   6519 	for (i = 0; i < sc->sc_nqueues; i++) {
   6520 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6521 		struct mbuf *m;
   6522 #ifdef WM_EVENT_COUNTERS
   6523 		int j;
   6524 
   6525 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6526 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6527 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6528 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6529 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6530 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6531 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6532 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6533 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6534 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6535 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6536 
   6537 		for (j = 0; j < WM_NTXSEGS; j++)
   6538 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6539 
   6540 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6541 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6542 #endif /* WM_EVENT_COUNTERS */
   6543 
   6544 		/* drain txq_interq */
   6545 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6546 			m_freem(m);
   6547 		pcq_destroy(txq->txq_interq);
   6548 
   6549 		wm_free_tx_buffer(sc, txq);
   6550 		wm_free_tx_descs(sc, txq);
   6551 		if (txq->txq_lock)
   6552 			mutex_obj_free(txq->txq_lock);
   6553 	}
   6554 
   6555 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6556 }
   6557 
   6558 static void
   6559 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6560 {
   6561 
   6562 	KASSERT(mutex_owned(txq->txq_lock));
   6563 
   6564 	/* Initialize the transmit descriptor ring. */
   6565 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6566 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6567 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6568 	txq->txq_free = WM_NTXDESC(txq);
   6569 	txq->txq_next = 0;
   6570 }
   6571 
   6572 static void
   6573 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6574     struct wm_txqueue *txq)
   6575 {
   6576 
   6577 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6578 		device_xname(sc->sc_dev), __func__));
   6579 	KASSERT(mutex_owned(txq->txq_lock));
   6580 
   6581 	if (sc->sc_type < WM_T_82543) {
   6582 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6583 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6584 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6585 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6586 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6587 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6588 	} else {
   6589 		int qid = wmq->wmq_id;
   6590 
   6591 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6592 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6593 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6594 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6595 
   6596 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6597 			/*
   6598 			 * Don't write TDT before TCTL.EN is set.
   6599 			 * See the document.
   6600 			 */
   6601 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6602 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6603 			    | TXDCTL_WTHRESH(0));
   6604 		else {
   6605 			/* XXX should update with AIM? */
   6606 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6607 			if (sc->sc_type >= WM_T_82540) {
   6608 				/* should be same */
   6609 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6610 			}
   6611 
   6612 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6613 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6614 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6615 		}
   6616 	}
   6617 }
   6618 
   6619 static void
   6620 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6621 {
   6622 	int i;
   6623 
   6624 	KASSERT(mutex_owned(txq->txq_lock));
   6625 
   6626 	/* Initialize the transmit job descriptors. */
   6627 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6628 		txq->txq_soft[i].txs_mbuf = NULL;
   6629 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6630 	txq->txq_snext = 0;
   6631 	txq->txq_sdirty = 0;
   6632 }
   6633 
   6634 static void
   6635 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6636     struct wm_txqueue *txq)
   6637 {
   6638 
   6639 	KASSERT(mutex_owned(txq->txq_lock));
   6640 
   6641 	/*
   6642 	 * Set up some register offsets that are different between
   6643 	 * the i82542 and the i82543 and later chips.
   6644 	 */
   6645 	if (sc->sc_type < WM_T_82543)
   6646 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6647 	else
   6648 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6649 
   6650 	wm_init_tx_descs(sc, txq);
   6651 	wm_init_tx_regs(sc, wmq, txq);
   6652 	wm_init_tx_buffer(sc, txq);
   6653 }
   6654 
   6655 static void
   6656 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6657     struct wm_rxqueue *rxq)
   6658 {
   6659 
   6660 	KASSERT(mutex_owned(rxq->rxq_lock));
   6661 
   6662 	/*
   6663 	 * Initialize the receive descriptor and receive job
   6664 	 * descriptor rings.
   6665 	 */
   6666 	if (sc->sc_type < WM_T_82543) {
   6667 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6668 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6669 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6670 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6671 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6672 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6673 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6674 
   6675 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6676 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6677 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6678 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6679 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6680 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6681 	} else {
   6682 		int qid = wmq->wmq_id;
   6683 
   6684 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6685 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6686 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6687 
   6688 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6689 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6690 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6691 
   6692 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6693 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6694 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6695 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6696 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6697 			    | RXDCTL_WTHRESH(1));
   6698 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6699 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6700 		} else {
   6701 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6702 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6703 			/* XXX should update with AIM? */
   6704 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6705 			/* MUST be same */
   6706 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6707 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6708 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6709 		}
   6710 	}
   6711 }
   6712 
   6713 static int
   6714 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6715 {
   6716 	struct wm_rxsoft *rxs;
   6717 	int error, i;
   6718 
   6719 	KASSERT(mutex_owned(rxq->rxq_lock));
   6720 
   6721 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6722 		rxs = &rxq->rxq_soft[i];
   6723 		if (rxs->rxs_mbuf == NULL) {
   6724 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6725 				log(LOG_ERR, "%s: unable to allocate or map "
   6726 				    "rx buffer %d, error = %d\n",
   6727 				    device_xname(sc->sc_dev), i, error);
   6728 				/*
   6729 				 * XXX Should attempt to run with fewer receive
   6730 				 * XXX buffers instead of just failing.
   6731 				 */
   6732 				wm_rxdrain(rxq);
   6733 				return ENOMEM;
   6734 			}
   6735 		} else {
   6736 			/*
   6737 			 * For 82575 and 82576, the RX descriptors must be
   6738 			 * initialized after the setting of RCTL.EN in
   6739 			 * wm_set_filter()
   6740 			 */
   6741 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6742 				wm_init_rxdesc(rxq, i);
   6743 		}
   6744 	}
   6745 	rxq->rxq_ptr = 0;
   6746 	rxq->rxq_discard = 0;
   6747 	WM_RXCHAIN_RESET(rxq);
   6748 
   6749 	return 0;
   6750 }
   6751 
   6752 static int
   6753 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6754     struct wm_rxqueue *rxq)
   6755 {
   6756 
   6757 	KASSERT(mutex_owned(rxq->rxq_lock));
   6758 
   6759 	/*
   6760 	 * Set up some register offsets that are different between
   6761 	 * the i82542 and the i82543 and later chips.
   6762 	 */
   6763 	if (sc->sc_type < WM_T_82543)
   6764 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6765 	else
   6766 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6767 
   6768 	wm_init_rx_regs(sc, wmq, rxq);
   6769 	return wm_init_rx_buffer(sc, rxq);
   6770 }
   6771 
   6772 /*
   6773  * wm_init_quques:
   6774  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6775  */
   6776 static int
   6777 wm_init_txrx_queues(struct wm_softc *sc)
   6778 {
   6779 	int i, error = 0;
   6780 
   6781 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6782 		device_xname(sc->sc_dev), __func__));
   6783 
   6784 	for (i = 0; i < sc->sc_nqueues; i++) {
   6785 		struct wm_queue *wmq = &sc->sc_queue[i];
   6786 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6787 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6788 
   6789 		/*
   6790 		 * TODO
   6791 		 * Currently, use constant variable instead of AIM.
   6792 		 * Furthermore, the interrupt interval of multiqueue which use
   6793 		 * polling mode is less than default value.
   6794 		 * More tuning and AIM are required.
   6795 		 */
   6796 		if (wm_is_using_multiqueue(sc))
   6797 			wmq->wmq_itr = 50;
   6798 		else
   6799 			wmq->wmq_itr = sc->sc_itr_init;
   6800 		wmq->wmq_set_itr = true;
   6801 
   6802 		mutex_enter(txq->txq_lock);
   6803 		wm_init_tx_queue(sc, wmq, txq);
   6804 		mutex_exit(txq->txq_lock);
   6805 
   6806 		mutex_enter(rxq->rxq_lock);
   6807 		error = wm_init_rx_queue(sc, wmq, rxq);
   6808 		mutex_exit(rxq->rxq_lock);
   6809 		if (error)
   6810 			break;
   6811 	}
   6812 
   6813 	return error;
   6814 }
   6815 
   6816 /*
   6817  * wm_tx_offload:
   6818  *
   6819  *	Set up TCP/IP checksumming parameters for the
   6820  *	specified packet.
   6821  */
   6822 static int
   6823 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6824     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6825 {
   6826 	struct mbuf *m0 = txs->txs_mbuf;
   6827 	struct livengood_tcpip_ctxdesc *t;
   6828 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6829 	uint32_t ipcse;
   6830 	struct ether_header *eh;
   6831 	int offset, iphl;
   6832 	uint8_t fields;
   6833 
   6834 	/*
   6835 	 * XXX It would be nice if the mbuf pkthdr had offset
   6836 	 * fields for the protocol headers.
   6837 	 */
   6838 
   6839 	eh = mtod(m0, struct ether_header *);
   6840 	switch (htons(eh->ether_type)) {
   6841 	case ETHERTYPE_IP:
   6842 	case ETHERTYPE_IPV6:
   6843 		offset = ETHER_HDR_LEN;
   6844 		break;
   6845 
   6846 	case ETHERTYPE_VLAN:
   6847 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6848 		break;
   6849 
   6850 	default:
   6851 		/*
   6852 		 * Don't support this protocol or encapsulation.
   6853 		 */
   6854 		*fieldsp = 0;
   6855 		*cmdp = 0;
   6856 		return 0;
   6857 	}
   6858 
   6859 	if ((m0->m_pkthdr.csum_flags &
   6860 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6861 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6862 	} else {
   6863 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6864 	}
   6865 	ipcse = offset + iphl - 1;
   6866 
   6867 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6868 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6869 	seg = 0;
   6870 	fields = 0;
   6871 
   6872 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6873 		int hlen = offset + iphl;
   6874 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6875 
   6876 		if (__predict_false(m0->m_len <
   6877 				    (hlen + sizeof(struct tcphdr)))) {
   6878 			/*
   6879 			 * TCP/IP headers are not in the first mbuf; we need
   6880 			 * to do this the slow and painful way.  Let's just
   6881 			 * hope this doesn't happen very often.
   6882 			 */
   6883 			struct tcphdr th;
   6884 
   6885 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6886 
   6887 			m_copydata(m0, hlen, sizeof(th), &th);
   6888 			if (v4) {
   6889 				struct ip ip;
   6890 
   6891 				m_copydata(m0, offset, sizeof(ip), &ip);
   6892 				ip.ip_len = 0;
   6893 				m_copyback(m0,
   6894 				    offset + offsetof(struct ip, ip_len),
   6895 				    sizeof(ip.ip_len), &ip.ip_len);
   6896 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6897 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6898 			} else {
   6899 				struct ip6_hdr ip6;
   6900 
   6901 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6902 				ip6.ip6_plen = 0;
   6903 				m_copyback(m0,
   6904 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6905 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6906 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6907 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6908 			}
   6909 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6910 			    sizeof(th.th_sum), &th.th_sum);
   6911 
   6912 			hlen += th.th_off << 2;
   6913 		} else {
   6914 			/*
   6915 			 * TCP/IP headers are in the first mbuf; we can do
   6916 			 * this the easy way.
   6917 			 */
   6918 			struct tcphdr *th;
   6919 
   6920 			if (v4) {
   6921 				struct ip *ip =
   6922 				    (void *)(mtod(m0, char *) + offset);
   6923 				th = (void *)(mtod(m0, char *) + hlen);
   6924 
   6925 				ip->ip_len = 0;
   6926 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6927 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6928 			} else {
   6929 				struct ip6_hdr *ip6 =
   6930 				    (void *)(mtod(m0, char *) + offset);
   6931 				th = (void *)(mtod(m0, char *) + hlen);
   6932 
   6933 				ip6->ip6_plen = 0;
   6934 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6935 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6936 			}
   6937 			hlen += th->th_off << 2;
   6938 		}
   6939 
   6940 		if (v4) {
   6941 			WM_Q_EVCNT_INCR(txq, txtso);
   6942 			cmdlen |= WTX_TCPIP_CMD_IP;
   6943 		} else {
   6944 			WM_Q_EVCNT_INCR(txq, txtso6);
   6945 			ipcse = 0;
   6946 		}
   6947 		cmd |= WTX_TCPIP_CMD_TSE;
   6948 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6949 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6950 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6951 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6952 	}
   6953 
   6954 	/*
   6955 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6956 	 * offload feature, if we load the context descriptor, we
   6957 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6958 	 */
   6959 
   6960 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6961 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6962 	    WTX_TCPIP_IPCSE(ipcse);
   6963 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6964 		WM_Q_EVCNT_INCR(txq, txipsum);
   6965 		fields |= WTX_IXSM;
   6966 	}
   6967 
   6968 	offset += iphl;
   6969 
   6970 	if (m0->m_pkthdr.csum_flags &
   6971 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6972 		WM_Q_EVCNT_INCR(txq, txtusum);
   6973 		fields |= WTX_TXSM;
   6974 		tucs = WTX_TCPIP_TUCSS(offset) |
   6975 		    WTX_TCPIP_TUCSO(offset +
   6976 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6977 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6978 	} else if ((m0->m_pkthdr.csum_flags &
   6979 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6980 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6981 		fields |= WTX_TXSM;
   6982 		tucs = WTX_TCPIP_TUCSS(offset) |
   6983 		    WTX_TCPIP_TUCSO(offset +
   6984 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6985 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6986 	} else {
   6987 		/* Just initialize it to a valid TCP context. */
   6988 		tucs = WTX_TCPIP_TUCSS(offset) |
   6989 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6990 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6991 	}
   6992 
   6993 	/*
   6994 	 * We don't have to write context descriptor for every packet
   6995 	 * except for 82574. For 82574, we must write context descriptor
   6996 	 * for every packet when we use two descriptor queues.
   6997 	 * It would be overhead to write context descriptor for every packet,
   6998 	 * however it does not cause problems.
   6999 	 */
   7000 	/* Fill in the context descriptor. */
   7001 	t = (struct livengood_tcpip_ctxdesc *)
   7002 	    &txq->txq_descs[txq->txq_next];
   7003 	t->tcpip_ipcs = htole32(ipcs);
   7004 	t->tcpip_tucs = htole32(tucs);
   7005 	t->tcpip_cmdlen = htole32(cmdlen);
   7006 	t->tcpip_seg = htole32(seg);
   7007 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7008 
   7009 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7010 	txs->txs_ndesc++;
   7011 
   7012 	*cmdp = cmd;
   7013 	*fieldsp = fields;
   7014 
   7015 	return 0;
   7016 }
   7017 
   7018 static inline int
   7019 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7020 {
   7021 	struct wm_softc *sc = ifp->if_softc;
   7022 	u_int cpuid = cpu_index(curcpu());
   7023 
   7024 	/*
   7025 	 * Currently, simple distribute strategy.
   7026 	 * TODO:
   7027 	 * distribute by flowid(RSS has value).
   7028 	 */
   7029         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   7030 }
   7031 
   7032 /*
   7033  * wm_start:		[ifnet interface function]
   7034  *
   7035  *	Start packet transmission on the interface.
   7036  */
   7037 static void
   7038 wm_start(struct ifnet *ifp)
   7039 {
   7040 	struct wm_softc *sc = ifp->if_softc;
   7041 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7042 
   7043 #ifdef WM_MPSAFE
   7044 	KASSERT(if_is_mpsafe(ifp));
   7045 #endif
   7046 	/*
   7047 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7048 	 */
   7049 
   7050 	mutex_enter(txq->txq_lock);
   7051 	if (!txq->txq_stopping)
   7052 		wm_start_locked(ifp);
   7053 	mutex_exit(txq->txq_lock);
   7054 }
   7055 
   7056 static void
   7057 wm_start_locked(struct ifnet *ifp)
   7058 {
   7059 	struct wm_softc *sc = ifp->if_softc;
   7060 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7061 
   7062 	wm_send_common_locked(ifp, txq, false);
   7063 }
   7064 
   7065 static int
   7066 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7067 {
   7068 	int qid;
   7069 	struct wm_softc *sc = ifp->if_softc;
   7070 	struct wm_txqueue *txq;
   7071 
   7072 	qid = wm_select_txqueue(ifp, m);
   7073 	txq = &sc->sc_queue[qid].wmq_txq;
   7074 
   7075 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7076 		m_freem(m);
   7077 		WM_Q_EVCNT_INCR(txq, txdrop);
   7078 		return ENOBUFS;
   7079 	}
   7080 
   7081 	/*
   7082 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7083 	 */
   7084 	ifp->if_obytes += m->m_pkthdr.len;
   7085 	if (m->m_flags & M_MCAST)
   7086 		ifp->if_omcasts++;
   7087 
   7088 	if (mutex_tryenter(txq->txq_lock)) {
   7089 		if (!txq->txq_stopping)
   7090 			wm_transmit_locked(ifp, txq);
   7091 		mutex_exit(txq->txq_lock);
   7092 	}
   7093 
   7094 	return 0;
   7095 }
   7096 
   7097 static void
   7098 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7099 {
   7100 
   7101 	wm_send_common_locked(ifp, txq, true);
   7102 }
   7103 
   7104 static void
   7105 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7106     bool is_transmit)
   7107 {
   7108 	struct wm_softc *sc = ifp->if_softc;
   7109 	struct mbuf *m0;
   7110 	struct wm_txsoft *txs;
   7111 	bus_dmamap_t dmamap;
   7112 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7113 	bus_addr_t curaddr;
   7114 	bus_size_t seglen, curlen;
   7115 	uint32_t cksumcmd;
   7116 	uint8_t cksumfields;
   7117 
   7118 	KASSERT(mutex_owned(txq->txq_lock));
   7119 
   7120 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7121 		return;
   7122 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7123 		return;
   7124 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7125 		return;
   7126 
   7127 	/* Remember the previous number of free descriptors. */
   7128 	ofree = txq->txq_free;
   7129 
   7130 	/*
   7131 	 * Loop through the send queue, setting up transmit descriptors
   7132 	 * until we drain the queue, or use up all available transmit
   7133 	 * descriptors.
   7134 	 */
   7135 	for (;;) {
   7136 		m0 = NULL;
   7137 
   7138 		/* Get a work queue entry. */
   7139 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7140 			wm_txeof(sc, txq);
   7141 			if (txq->txq_sfree == 0) {
   7142 				DPRINTF(WM_DEBUG_TX,
   7143 				    ("%s: TX: no free job descriptors\n",
   7144 					device_xname(sc->sc_dev)));
   7145 				WM_Q_EVCNT_INCR(txq, txsstall);
   7146 				break;
   7147 			}
   7148 		}
   7149 
   7150 		/* Grab a packet off the queue. */
   7151 		if (is_transmit)
   7152 			m0 = pcq_get(txq->txq_interq);
   7153 		else
   7154 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7155 		if (m0 == NULL)
   7156 			break;
   7157 
   7158 		DPRINTF(WM_DEBUG_TX,
   7159 		    ("%s: TX: have packet to transmit: %p\n",
   7160 		    device_xname(sc->sc_dev), m0));
   7161 
   7162 		txs = &txq->txq_soft[txq->txq_snext];
   7163 		dmamap = txs->txs_dmamap;
   7164 
   7165 		use_tso = (m0->m_pkthdr.csum_flags &
   7166 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7167 
   7168 		/*
   7169 		 * So says the Linux driver:
   7170 		 * The controller does a simple calculation to make sure
   7171 		 * there is enough room in the FIFO before initiating the
   7172 		 * DMA for each buffer.  The calc is:
   7173 		 *	4 = ceil(buffer len / MSS)
   7174 		 * To make sure we don't overrun the FIFO, adjust the max
   7175 		 * buffer len if the MSS drops.
   7176 		 */
   7177 		dmamap->dm_maxsegsz =
   7178 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7179 		    ? m0->m_pkthdr.segsz << 2
   7180 		    : WTX_MAX_LEN;
   7181 
   7182 		/*
   7183 		 * Load the DMA map.  If this fails, the packet either
   7184 		 * didn't fit in the allotted number of segments, or we
   7185 		 * were short on resources.  For the too-many-segments
   7186 		 * case, we simply report an error and drop the packet,
   7187 		 * since we can't sanely copy a jumbo packet to a single
   7188 		 * buffer.
   7189 		 */
   7190 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7191 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7192 		if (error) {
   7193 			if (error == EFBIG) {
   7194 				WM_Q_EVCNT_INCR(txq, txdrop);
   7195 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7196 				    "DMA segments, dropping...\n",
   7197 				    device_xname(sc->sc_dev));
   7198 				wm_dump_mbuf_chain(sc, m0);
   7199 				m_freem(m0);
   7200 				continue;
   7201 			}
   7202 			/*  Short on resources, just stop for now. */
   7203 			DPRINTF(WM_DEBUG_TX,
   7204 			    ("%s: TX: dmamap load failed: %d\n",
   7205 			    device_xname(sc->sc_dev), error));
   7206 			break;
   7207 		}
   7208 
   7209 		segs_needed = dmamap->dm_nsegs;
   7210 		if (use_tso) {
   7211 			/* For sentinel descriptor; see below. */
   7212 			segs_needed++;
   7213 		}
   7214 
   7215 		/*
   7216 		 * Ensure we have enough descriptors free to describe
   7217 		 * the packet.  Note, we always reserve one descriptor
   7218 		 * at the end of the ring due to the semantics of the
   7219 		 * TDT register, plus one more in the event we need
   7220 		 * to load offload context.
   7221 		 */
   7222 		if (segs_needed > txq->txq_free - 2) {
   7223 			/*
   7224 			 * Not enough free descriptors to transmit this
   7225 			 * packet.  We haven't committed anything yet,
   7226 			 * so just unload the DMA map, put the packet
   7227 			 * pack on the queue, and punt.  Notify the upper
   7228 			 * layer that there are no more slots left.
   7229 			 */
   7230 			DPRINTF(WM_DEBUG_TX,
   7231 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7232 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7233 			    segs_needed, txq->txq_free - 1));
   7234 			if (!is_transmit)
   7235 				ifp->if_flags |= IFF_OACTIVE;
   7236 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7237 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7238 			WM_Q_EVCNT_INCR(txq, txdstall);
   7239 			break;
   7240 		}
   7241 
   7242 		/*
   7243 		 * Check for 82547 Tx FIFO bug.  We need to do this
   7244 		 * once we know we can transmit the packet, since we
   7245 		 * do some internal FIFO space accounting here.
   7246 		 */
   7247 		if (sc->sc_type == WM_T_82547 &&
   7248 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7249 			DPRINTF(WM_DEBUG_TX,
   7250 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7251 			    device_xname(sc->sc_dev)));
   7252 			if (!is_transmit)
   7253 				ifp->if_flags |= IFF_OACTIVE;
   7254 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7255 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7256 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   7257 			break;
   7258 		}
   7259 
   7260 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7261 
   7262 		DPRINTF(WM_DEBUG_TX,
   7263 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7264 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7265 
   7266 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7267 
   7268 		/*
   7269 		 * Store a pointer to the packet so that we can free it
   7270 		 * later.
   7271 		 *
   7272 		 * Initially, we consider the number of descriptors the
   7273 		 * packet uses the number of DMA segments.  This may be
   7274 		 * incremented by 1 if we do checksum offload (a descriptor
   7275 		 * is used to set the checksum context).
   7276 		 */
   7277 		txs->txs_mbuf = m0;
   7278 		txs->txs_firstdesc = txq->txq_next;
   7279 		txs->txs_ndesc = segs_needed;
   7280 
   7281 		/* Set up offload parameters for this packet. */
   7282 		if (m0->m_pkthdr.csum_flags &
   7283 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7284 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7285 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7286 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7287 					  &cksumfields) != 0) {
   7288 				/* Error message already displayed. */
   7289 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7290 				continue;
   7291 			}
   7292 		} else {
   7293 			cksumcmd = 0;
   7294 			cksumfields = 0;
   7295 		}
   7296 
   7297 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7298 
   7299 		/* Sync the DMA map. */
   7300 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7301 		    BUS_DMASYNC_PREWRITE);
   7302 
   7303 		/* Initialize the transmit descriptor. */
   7304 		for (nexttx = txq->txq_next, seg = 0;
   7305 		     seg < dmamap->dm_nsegs; seg++) {
   7306 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7307 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7308 			     seglen != 0;
   7309 			     curaddr += curlen, seglen -= curlen,
   7310 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7311 				curlen = seglen;
   7312 
   7313 				/*
   7314 				 * So says the Linux driver:
   7315 				 * Work around for premature descriptor
   7316 				 * write-backs in TSO mode.  Append a
   7317 				 * 4-byte sentinel descriptor.
   7318 				 */
   7319 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7320 				    curlen > 8)
   7321 					curlen -= 4;
   7322 
   7323 				wm_set_dma_addr(
   7324 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7325 				txq->txq_descs[nexttx].wtx_cmdlen
   7326 				    = htole32(cksumcmd | curlen);
   7327 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7328 				    = 0;
   7329 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7330 				    = cksumfields;
   7331 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7332 				lasttx = nexttx;
   7333 
   7334 				DPRINTF(WM_DEBUG_TX,
   7335 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7336 				     "len %#04zx\n",
   7337 				    device_xname(sc->sc_dev), nexttx,
   7338 				    (uint64_t)curaddr, curlen));
   7339 			}
   7340 		}
   7341 
   7342 		KASSERT(lasttx != -1);
   7343 
   7344 		/*
   7345 		 * Set up the command byte on the last descriptor of
   7346 		 * the packet.  If we're in the interrupt delay window,
   7347 		 * delay the interrupt.
   7348 		 */
   7349 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7350 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7351 
   7352 		/*
   7353 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7354 		 * up the descriptor to encapsulate the packet for us.
   7355 		 *
   7356 		 * This is only valid on the last descriptor of the packet.
   7357 		 */
   7358 		if (vlan_has_tag(m0)) {
   7359 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7360 			    htole32(WTX_CMD_VLE);
   7361 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7362 			    = htole16(vlan_get_tag(m0));
   7363 		}
   7364 
   7365 		txs->txs_lastdesc = lasttx;
   7366 
   7367 		DPRINTF(WM_DEBUG_TX,
   7368 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7369 		    device_xname(sc->sc_dev),
   7370 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7371 
   7372 		/* Sync the descriptors we're using. */
   7373 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7374 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7375 
   7376 		/* Give the packet to the chip. */
   7377 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7378 
   7379 		DPRINTF(WM_DEBUG_TX,
   7380 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7381 
   7382 		DPRINTF(WM_DEBUG_TX,
   7383 		    ("%s: TX: finished transmitting packet, job %d\n",
   7384 		    device_xname(sc->sc_dev), txq->txq_snext));
   7385 
   7386 		/* Advance the tx pointer. */
   7387 		txq->txq_free -= txs->txs_ndesc;
   7388 		txq->txq_next = nexttx;
   7389 
   7390 		txq->txq_sfree--;
   7391 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7392 
   7393 		/* Pass the packet to any BPF listeners. */
   7394 		bpf_mtap(ifp, m0);
   7395 	}
   7396 
   7397 	if (m0 != NULL) {
   7398 		if (!is_transmit)
   7399 			ifp->if_flags |= IFF_OACTIVE;
   7400 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7401 		WM_Q_EVCNT_INCR(txq, txdrop);
   7402 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7403 			__func__));
   7404 		m_freem(m0);
   7405 	}
   7406 
   7407 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7408 		/* No more slots; notify upper layer. */
   7409 		if (!is_transmit)
   7410 			ifp->if_flags |= IFF_OACTIVE;
   7411 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7412 	}
   7413 
   7414 	if (txq->txq_free != ofree) {
   7415 		/* Set a watchdog timer in case the chip flakes out. */
   7416 		ifp->if_timer = 5;
   7417 	}
   7418 }
   7419 
   7420 /*
   7421  * wm_nq_tx_offload:
   7422  *
   7423  *	Set up TCP/IP checksumming parameters for the
   7424  *	specified packet, for NEWQUEUE devices
   7425  */
   7426 static int
   7427 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7428     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7429 {
   7430 	struct mbuf *m0 = txs->txs_mbuf;
   7431 	uint32_t vl_len, mssidx, cmdc;
   7432 	struct ether_header *eh;
   7433 	int offset, iphl;
   7434 
   7435 	/*
   7436 	 * XXX It would be nice if the mbuf pkthdr had offset
   7437 	 * fields for the protocol headers.
   7438 	 */
   7439 	*cmdlenp = 0;
   7440 	*fieldsp = 0;
   7441 
   7442 	eh = mtod(m0, struct ether_header *);
   7443 	switch (htons(eh->ether_type)) {
   7444 	case ETHERTYPE_IP:
   7445 	case ETHERTYPE_IPV6:
   7446 		offset = ETHER_HDR_LEN;
   7447 		break;
   7448 
   7449 	case ETHERTYPE_VLAN:
   7450 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7451 		break;
   7452 
   7453 	default:
   7454 		/* Don't support this protocol or encapsulation. */
   7455 		*do_csum = false;
   7456 		return 0;
   7457 	}
   7458 	*do_csum = true;
   7459 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7460 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7461 
   7462 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7463 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7464 
   7465 	if ((m0->m_pkthdr.csum_flags &
   7466 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7467 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7468 	} else {
   7469 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7470 	}
   7471 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7472 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7473 
   7474 	if (vlan_has_tag(m0)) {
   7475 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7476 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7477 		*cmdlenp |= NQTX_CMD_VLE;
   7478 	}
   7479 
   7480 	mssidx = 0;
   7481 
   7482 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7483 		int hlen = offset + iphl;
   7484 		int tcp_hlen;
   7485 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7486 
   7487 		if (__predict_false(m0->m_len <
   7488 				    (hlen + sizeof(struct tcphdr)))) {
   7489 			/*
   7490 			 * TCP/IP headers are not in the first mbuf; we need
   7491 			 * to do this the slow and painful way.  Let's just
   7492 			 * hope this doesn't happen very often.
   7493 			 */
   7494 			struct tcphdr th;
   7495 
   7496 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7497 
   7498 			m_copydata(m0, hlen, sizeof(th), &th);
   7499 			if (v4) {
   7500 				struct ip ip;
   7501 
   7502 				m_copydata(m0, offset, sizeof(ip), &ip);
   7503 				ip.ip_len = 0;
   7504 				m_copyback(m0,
   7505 				    offset + offsetof(struct ip, ip_len),
   7506 				    sizeof(ip.ip_len), &ip.ip_len);
   7507 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7508 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7509 			} else {
   7510 				struct ip6_hdr ip6;
   7511 
   7512 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7513 				ip6.ip6_plen = 0;
   7514 				m_copyback(m0,
   7515 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7516 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7517 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7518 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7519 			}
   7520 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7521 			    sizeof(th.th_sum), &th.th_sum);
   7522 
   7523 			tcp_hlen = th.th_off << 2;
   7524 		} else {
   7525 			/*
   7526 			 * TCP/IP headers are in the first mbuf; we can do
   7527 			 * this the easy way.
   7528 			 */
   7529 			struct tcphdr *th;
   7530 
   7531 			if (v4) {
   7532 				struct ip *ip =
   7533 				    (void *)(mtod(m0, char *) + offset);
   7534 				th = (void *)(mtod(m0, char *) + hlen);
   7535 
   7536 				ip->ip_len = 0;
   7537 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7538 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7539 			} else {
   7540 				struct ip6_hdr *ip6 =
   7541 				    (void *)(mtod(m0, char *) + offset);
   7542 				th = (void *)(mtod(m0, char *) + hlen);
   7543 
   7544 				ip6->ip6_plen = 0;
   7545 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7546 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7547 			}
   7548 			tcp_hlen = th->th_off << 2;
   7549 		}
   7550 		hlen += tcp_hlen;
   7551 		*cmdlenp |= NQTX_CMD_TSE;
   7552 
   7553 		if (v4) {
   7554 			WM_Q_EVCNT_INCR(txq, txtso);
   7555 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7556 		} else {
   7557 			WM_Q_EVCNT_INCR(txq, txtso6);
   7558 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7559 		}
   7560 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7561 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7562 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7563 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7564 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7565 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7566 	} else {
   7567 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7568 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7569 	}
   7570 
   7571 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7572 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7573 		cmdc |= NQTXC_CMD_IP4;
   7574 	}
   7575 
   7576 	if (m0->m_pkthdr.csum_flags &
   7577 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7578 		WM_Q_EVCNT_INCR(txq, txtusum);
   7579 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7580 			cmdc |= NQTXC_CMD_TCP;
   7581 		} else {
   7582 			cmdc |= NQTXC_CMD_UDP;
   7583 		}
   7584 		cmdc |= NQTXC_CMD_IP4;
   7585 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7586 	}
   7587 	if (m0->m_pkthdr.csum_flags &
   7588 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7589 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7590 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7591 			cmdc |= NQTXC_CMD_TCP;
   7592 		} else {
   7593 			cmdc |= NQTXC_CMD_UDP;
   7594 		}
   7595 		cmdc |= NQTXC_CMD_IP6;
   7596 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7597 	}
   7598 
   7599 	/*
   7600 	 * We don't have to write context descriptor for every packet to
   7601 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7602 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7603 	 * controllers.
   7604 	 * It would be overhead to write context descriptor for every packet,
   7605 	 * however it does not cause problems.
   7606 	 */
   7607 	/* Fill in the context descriptor. */
   7608 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7609 	    htole32(vl_len);
   7610 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7611 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7612 	    htole32(cmdc);
   7613 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7614 	    htole32(mssidx);
   7615 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7616 	DPRINTF(WM_DEBUG_TX,
   7617 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7618 	    txq->txq_next, 0, vl_len));
   7619 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7620 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7621 	txs->txs_ndesc++;
   7622 	return 0;
   7623 }
   7624 
   7625 /*
   7626  * wm_nq_start:		[ifnet interface function]
   7627  *
   7628  *	Start packet transmission on the interface for NEWQUEUE devices
   7629  */
   7630 static void
   7631 wm_nq_start(struct ifnet *ifp)
   7632 {
   7633 	struct wm_softc *sc = ifp->if_softc;
   7634 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7635 
   7636 #ifdef WM_MPSAFE
   7637 	KASSERT(if_is_mpsafe(ifp));
   7638 #endif
   7639 	/*
   7640 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7641 	 */
   7642 
   7643 	mutex_enter(txq->txq_lock);
   7644 	if (!txq->txq_stopping)
   7645 		wm_nq_start_locked(ifp);
   7646 	mutex_exit(txq->txq_lock);
   7647 }
   7648 
   7649 static void
   7650 wm_nq_start_locked(struct ifnet *ifp)
   7651 {
   7652 	struct wm_softc *sc = ifp->if_softc;
   7653 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7654 
   7655 	wm_nq_send_common_locked(ifp, txq, false);
   7656 }
   7657 
   7658 static int
   7659 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7660 {
   7661 	int qid;
   7662 	struct wm_softc *sc = ifp->if_softc;
   7663 	struct wm_txqueue *txq;
   7664 
   7665 	qid = wm_select_txqueue(ifp, m);
   7666 	txq = &sc->sc_queue[qid].wmq_txq;
   7667 
   7668 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7669 		m_freem(m);
   7670 		WM_Q_EVCNT_INCR(txq, txdrop);
   7671 		return ENOBUFS;
   7672 	}
   7673 
   7674 	/*
   7675 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7676 	 */
   7677 	ifp->if_obytes += m->m_pkthdr.len;
   7678 	if (m->m_flags & M_MCAST)
   7679 		ifp->if_omcasts++;
   7680 
   7681 	/*
   7682 	 * The situations which this mutex_tryenter() fails at running time
   7683 	 * are below two patterns.
   7684 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7685 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7686 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7687 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7688 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7689 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7690 	 */
   7691 	if (mutex_tryenter(txq->txq_lock)) {
   7692 		if (!txq->txq_stopping)
   7693 			wm_nq_transmit_locked(ifp, txq);
   7694 		mutex_exit(txq->txq_lock);
   7695 	}
   7696 
   7697 	return 0;
   7698 }
   7699 
   7700 static void
   7701 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7702 {
   7703 
   7704 	wm_nq_send_common_locked(ifp, txq, true);
   7705 }
   7706 
   7707 static void
   7708 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7709     bool is_transmit)
   7710 {
   7711 	struct wm_softc *sc = ifp->if_softc;
   7712 	struct mbuf *m0;
   7713 	struct wm_txsoft *txs;
   7714 	bus_dmamap_t dmamap;
   7715 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7716 	bool do_csum, sent;
   7717 
   7718 	KASSERT(mutex_owned(txq->txq_lock));
   7719 
   7720 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7721 		return;
   7722 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7723 		return;
   7724 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7725 		return;
   7726 
   7727 	sent = false;
   7728 
   7729 	/*
   7730 	 * Loop through the send queue, setting up transmit descriptors
   7731 	 * until we drain the queue, or use up all available transmit
   7732 	 * descriptors.
   7733 	 */
   7734 	for (;;) {
   7735 		m0 = NULL;
   7736 
   7737 		/* Get a work queue entry. */
   7738 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7739 			wm_txeof(sc, txq);
   7740 			if (txq->txq_sfree == 0) {
   7741 				DPRINTF(WM_DEBUG_TX,
   7742 				    ("%s: TX: no free job descriptors\n",
   7743 					device_xname(sc->sc_dev)));
   7744 				WM_Q_EVCNT_INCR(txq, txsstall);
   7745 				break;
   7746 			}
   7747 		}
   7748 
   7749 		/* Grab a packet off the queue. */
   7750 		if (is_transmit)
   7751 			m0 = pcq_get(txq->txq_interq);
   7752 		else
   7753 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7754 		if (m0 == NULL)
   7755 			break;
   7756 
   7757 		DPRINTF(WM_DEBUG_TX,
   7758 		    ("%s: TX: have packet to transmit: %p\n",
   7759 		    device_xname(sc->sc_dev), m0));
   7760 
   7761 		txs = &txq->txq_soft[txq->txq_snext];
   7762 		dmamap = txs->txs_dmamap;
   7763 
   7764 		/*
   7765 		 * Load the DMA map.  If this fails, the packet either
   7766 		 * didn't fit in the allotted number of segments, or we
   7767 		 * were short on resources.  For the too-many-segments
   7768 		 * case, we simply report an error and drop the packet,
   7769 		 * since we can't sanely copy a jumbo packet to a single
   7770 		 * buffer.
   7771 		 */
   7772 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7773 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7774 		if (error) {
   7775 			if (error == EFBIG) {
   7776 				WM_Q_EVCNT_INCR(txq, txdrop);
   7777 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7778 				    "DMA segments, dropping...\n",
   7779 				    device_xname(sc->sc_dev));
   7780 				wm_dump_mbuf_chain(sc, m0);
   7781 				m_freem(m0);
   7782 				continue;
   7783 			}
   7784 			/* Short on resources, just stop for now. */
   7785 			DPRINTF(WM_DEBUG_TX,
   7786 			    ("%s: TX: dmamap load failed: %d\n",
   7787 			    device_xname(sc->sc_dev), error));
   7788 			break;
   7789 		}
   7790 
   7791 		segs_needed = dmamap->dm_nsegs;
   7792 
   7793 		/*
   7794 		 * Ensure we have enough descriptors free to describe
   7795 		 * the packet.  Note, we always reserve one descriptor
   7796 		 * at the end of the ring due to the semantics of the
   7797 		 * TDT register, plus one more in the event we need
   7798 		 * to load offload context.
   7799 		 */
   7800 		if (segs_needed > txq->txq_free - 2) {
   7801 			/*
   7802 			 * Not enough free descriptors to transmit this
   7803 			 * packet.  We haven't committed anything yet,
   7804 			 * so just unload the DMA map, put the packet
   7805 			 * pack on the queue, and punt.  Notify the upper
   7806 			 * layer that there are no more slots left.
   7807 			 */
   7808 			DPRINTF(WM_DEBUG_TX,
   7809 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7810 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7811 			    segs_needed, txq->txq_free - 1));
   7812 			if (!is_transmit)
   7813 				ifp->if_flags |= IFF_OACTIVE;
   7814 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7815 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7816 			WM_Q_EVCNT_INCR(txq, txdstall);
   7817 			break;
   7818 		}
   7819 
   7820 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7821 
   7822 		DPRINTF(WM_DEBUG_TX,
   7823 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7824 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7825 
   7826 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7827 
   7828 		/*
   7829 		 * Store a pointer to the packet so that we can free it
   7830 		 * later.
   7831 		 *
   7832 		 * Initially, we consider the number of descriptors the
   7833 		 * packet uses the number of DMA segments.  This may be
   7834 		 * incremented by 1 if we do checksum offload (a descriptor
   7835 		 * is used to set the checksum context).
   7836 		 */
   7837 		txs->txs_mbuf = m0;
   7838 		txs->txs_firstdesc = txq->txq_next;
   7839 		txs->txs_ndesc = segs_needed;
   7840 
   7841 		/* Set up offload parameters for this packet. */
   7842 		uint32_t cmdlen, fields, dcmdlen;
   7843 		if (m0->m_pkthdr.csum_flags &
   7844 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7845 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7846 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7847 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7848 			    &do_csum) != 0) {
   7849 				/* Error message already displayed. */
   7850 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7851 				continue;
   7852 			}
   7853 		} else {
   7854 			do_csum = false;
   7855 			cmdlen = 0;
   7856 			fields = 0;
   7857 		}
   7858 
   7859 		/* Sync the DMA map. */
   7860 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7861 		    BUS_DMASYNC_PREWRITE);
   7862 
   7863 		/* Initialize the first transmit descriptor. */
   7864 		nexttx = txq->txq_next;
   7865 		if (!do_csum) {
   7866 			/* setup a legacy descriptor */
   7867 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7868 			    dmamap->dm_segs[0].ds_addr);
   7869 			txq->txq_descs[nexttx].wtx_cmdlen =
   7870 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7871 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7872 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7873 			if (vlan_has_tag(m0)) {
   7874 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7875 				    htole32(WTX_CMD_VLE);
   7876 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7877 				    htole16(vlan_get_tag(m0));
   7878 			} else {
   7879 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7880 			}
   7881 			dcmdlen = 0;
   7882 		} else {
   7883 			/* setup an advanced data descriptor */
   7884 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7885 			    htole64(dmamap->dm_segs[0].ds_addr);
   7886 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7887 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7888 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7889 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7890 			    htole32(fields);
   7891 			DPRINTF(WM_DEBUG_TX,
   7892 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7893 			    device_xname(sc->sc_dev), nexttx,
   7894 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7895 			DPRINTF(WM_DEBUG_TX,
   7896 			    ("\t 0x%08x%08x\n", fields,
   7897 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7898 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7899 		}
   7900 
   7901 		lasttx = nexttx;
   7902 		nexttx = WM_NEXTTX(txq, nexttx);
   7903 		/*
   7904 		 * fill in the next descriptors. legacy or advanced format
   7905 		 * is the same here
   7906 		 */
   7907 		for (seg = 1; seg < dmamap->dm_nsegs;
   7908 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7909 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7910 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7911 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7912 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7913 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7914 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7915 			lasttx = nexttx;
   7916 
   7917 			DPRINTF(WM_DEBUG_TX,
   7918 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7919 			     "len %#04zx\n",
   7920 			    device_xname(sc->sc_dev), nexttx,
   7921 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7922 			    dmamap->dm_segs[seg].ds_len));
   7923 		}
   7924 
   7925 		KASSERT(lasttx != -1);
   7926 
   7927 		/*
   7928 		 * Set up the command byte on the last descriptor of
   7929 		 * the packet.  If we're in the interrupt delay window,
   7930 		 * delay the interrupt.
   7931 		 */
   7932 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7933 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7934 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7935 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7936 
   7937 		txs->txs_lastdesc = lasttx;
   7938 
   7939 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7940 		    device_xname(sc->sc_dev),
   7941 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7942 
   7943 		/* Sync the descriptors we're using. */
   7944 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7945 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7946 
   7947 		/* Give the packet to the chip. */
   7948 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7949 		sent = true;
   7950 
   7951 		DPRINTF(WM_DEBUG_TX,
   7952 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7953 
   7954 		DPRINTF(WM_DEBUG_TX,
   7955 		    ("%s: TX: finished transmitting packet, job %d\n",
   7956 		    device_xname(sc->sc_dev), txq->txq_snext));
   7957 
   7958 		/* Advance the tx pointer. */
   7959 		txq->txq_free -= txs->txs_ndesc;
   7960 		txq->txq_next = nexttx;
   7961 
   7962 		txq->txq_sfree--;
   7963 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7964 
   7965 		/* Pass the packet to any BPF listeners. */
   7966 		bpf_mtap(ifp, m0);
   7967 	}
   7968 
   7969 	if (m0 != NULL) {
   7970 		if (!is_transmit)
   7971 			ifp->if_flags |= IFF_OACTIVE;
   7972 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7973 		WM_Q_EVCNT_INCR(txq, txdrop);
   7974 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7975 			__func__));
   7976 		m_freem(m0);
   7977 	}
   7978 
   7979 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7980 		/* No more slots; notify upper layer. */
   7981 		if (!is_transmit)
   7982 			ifp->if_flags |= IFF_OACTIVE;
   7983 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7984 	}
   7985 
   7986 	if (sent) {
   7987 		/* Set a watchdog timer in case the chip flakes out. */
   7988 		ifp->if_timer = 5;
   7989 	}
   7990 }
   7991 
   7992 static void
   7993 wm_deferred_start_locked(struct wm_txqueue *txq)
   7994 {
   7995 	struct wm_softc *sc = txq->txq_sc;
   7996 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7997 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7998 	int qid = wmq->wmq_id;
   7999 
   8000 	KASSERT(mutex_owned(txq->txq_lock));
   8001 
   8002 	if (txq->txq_stopping) {
   8003 		mutex_exit(txq->txq_lock);
   8004 		return;
   8005 	}
   8006 
   8007 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8008 		/* XXX need for ALTQ or one CPU system */
   8009 		if (qid == 0)
   8010 			wm_nq_start_locked(ifp);
   8011 		wm_nq_transmit_locked(ifp, txq);
   8012 	} else {
   8013 		/* XXX need for ALTQ or one CPU system */
   8014 		if (qid == 0)
   8015 			wm_start_locked(ifp);
   8016 		wm_transmit_locked(ifp, txq);
   8017 	}
   8018 }
   8019 
   8020 /* Interrupt */
   8021 
   8022 /*
   8023  * wm_txeof:
   8024  *
   8025  *	Helper; handle transmit interrupts.
   8026  */
   8027 static int
   8028 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   8029 {
   8030 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8031 	struct wm_txsoft *txs;
   8032 	bool processed = false;
   8033 	int count = 0;
   8034 	int i;
   8035 	uint8_t status;
   8036 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8037 
   8038 	KASSERT(mutex_owned(txq->txq_lock));
   8039 
   8040 	if (txq->txq_stopping)
   8041 		return 0;
   8042 
   8043 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8044 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8045 	if (wmq->wmq_id == 0)
   8046 		ifp->if_flags &= ~IFF_OACTIVE;
   8047 
   8048 	/*
   8049 	 * Go through the Tx list and free mbufs for those
   8050 	 * frames which have been transmitted.
   8051 	 */
   8052 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8053 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8054 		txs = &txq->txq_soft[i];
   8055 
   8056 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8057 			device_xname(sc->sc_dev), i));
   8058 
   8059 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8060 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8061 
   8062 		status =
   8063 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8064 		if ((status & WTX_ST_DD) == 0) {
   8065 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8066 			    BUS_DMASYNC_PREREAD);
   8067 			break;
   8068 		}
   8069 
   8070 		processed = true;
   8071 		count++;
   8072 		DPRINTF(WM_DEBUG_TX,
   8073 		    ("%s: TX: job %d done: descs %d..%d\n",
   8074 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8075 		    txs->txs_lastdesc));
   8076 
   8077 		/*
   8078 		 * XXX We should probably be using the statistics
   8079 		 * XXX registers, but I don't know if they exist
   8080 		 * XXX on chips before the i82544.
   8081 		 */
   8082 
   8083 #ifdef WM_EVENT_COUNTERS
   8084 		if (status & WTX_ST_TU)
   8085 			WM_Q_EVCNT_INCR(txq, tu);
   8086 #endif /* WM_EVENT_COUNTERS */
   8087 
   8088 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   8089 			ifp->if_oerrors++;
   8090 			if (status & WTX_ST_LC)
   8091 				log(LOG_WARNING, "%s: late collision\n",
   8092 				    device_xname(sc->sc_dev));
   8093 			else if (status & WTX_ST_EC) {
   8094 				ifp->if_collisions += 16;
   8095 				log(LOG_WARNING, "%s: excessive collisions\n",
   8096 				    device_xname(sc->sc_dev));
   8097 			}
   8098 		} else
   8099 			ifp->if_opackets++;
   8100 
   8101 		txq->txq_packets++;
   8102 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8103 
   8104 		txq->txq_free += txs->txs_ndesc;
   8105 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8106 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8107 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8108 		m_freem(txs->txs_mbuf);
   8109 		txs->txs_mbuf = NULL;
   8110 	}
   8111 
   8112 	/* Update the dirty transmit buffer pointer. */
   8113 	txq->txq_sdirty = i;
   8114 	DPRINTF(WM_DEBUG_TX,
   8115 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8116 
   8117 	if (count != 0)
   8118 		rnd_add_uint32(&sc->rnd_source, count);
   8119 
   8120 	/*
   8121 	 * If there are no more pending transmissions, cancel the watchdog
   8122 	 * timer.
   8123 	 */
   8124 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8125 		ifp->if_timer = 0;
   8126 
   8127 	return processed;
   8128 }
   8129 
   8130 static inline uint32_t
   8131 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8132 {
   8133 	struct wm_softc *sc = rxq->rxq_sc;
   8134 
   8135 	if (sc->sc_type == WM_T_82574)
   8136 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8137 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8138 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8139 	else
   8140 		return rxq->rxq_descs[idx].wrx_status;
   8141 }
   8142 
   8143 static inline uint32_t
   8144 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8145 {
   8146 	struct wm_softc *sc = rxq->rxq_sc;
   8147 
   8148 	if (sc->sc_type == WM_T_82574)
   8149 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8150 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8151 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8152 	else
   8153 		return rxq->rxq_descs[idx].wrx_errors;
   8154 }
   8155 
   8156 static inline uint16_t
   8157 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8158 {
   8159 	struct wm_softc *sc = rxq->rxq_sc;
   8160 
   8161 	if (sc->sc_type == WM_T_82574)
   8162 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8163 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8164 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8165 	else
   8166 		return rxq->rxq_descs[idx].wrx_special;
   8167 }
   8168 
   8169 static inline int
   8170 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8171 {
   8172 	struct wm_softc *sc = rxq->rxq_sc;
   8173 
   8174 	if (sc->sc_type == WM_T_82574)
   8175 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8176 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8177 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8178 	else
   8179 		return rxq->rxq_descs[idx].wrx_len;
   8180 }
   8181 
   8182 #ifdef WM_DEBUG
   8183 static inline uint32_t
   8184 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8185 {
   8186 	struct wm_softc *sc = rxq->rxq_sc;
   8187 
   8188 	if (sc->sc_type == WM_T_82574)
   8189 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8190 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8191 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8192 	else
   8193 		return 0;
   8194 }
   8195 
   8196 static inline uint8_t
   8197 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8198 {
   8199 	struct wm_softc *sc = rxq->rxq_sc;
   8200 
   8201 	if (sc->sc_type == WM_T_82574)
   8202 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8203 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8204 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8205 	else
   8206 		return 0;
   8207 }
   8208 #endif /* WM_DEBUG */
   8209 
   8210 static inline bool
   8211 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8212     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8213 {
   8214 
   8215 	if (sc->sc_type == WM_T_82574)
   8216 		return (status & ext_bit) != 0;
   8217 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8218 		return (status & nq_bit) != 0;
   8219 	else
   8220 		return (status & legacy_bit) != 0;
   8221 }
   8222 
   8223 static inline bool
   8224 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8225     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8226 {
   8227 
   8228 	if (sc->sc_type == WM_T_82574)
   8229 		return (error & ext_bit) != 0;
   8230 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8231 		return (error & nq_bit) != 0;
   8232 	else
   8233 		return (error & legacy_bit) != 0;
   8234 }
   8235 
   8236 static inline bool
   8237 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8238 {
   8239 
   8240 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8241 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8242 		return true;
   8243 	else
   8244 		return false;
   8245 }
   8246 
   8247 static inline bool
   8248 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8249 {
   8250 	struct wm_softc *sc = rxq->rxq_sc;
   8251 
   8252 	/* XXXX missing error bit for newqueue? */
   8253 	if (wm_rxdesc_is_set_error(sc, errors,
   8254 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   8255 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   8256 		NQRXC_ERROR_RXE)) {
   8257 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   8258 			log(LOG_WARNING, "%s: symbol error\n",
   8259 			    device_xname(sc->sc_dev));
   8260 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   8261 			log(LOG_WARNING, "%s: receive sequence error\n",
   8262 			    device_xname(sc->sc_dev));
   8263 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   8264 			log(LOG_WARNING, "%s: CRC error\n",
   8265 			    device_xname(sc->sc_dev));
   8266 		return true;
   8267 	}
   8268 
   8269 	return false;
   8270 }
   8271 
   8272 static inline bool
   8273 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8274 {
   8275 	struct wm_softc *sc = rxq->rxq_sc;
   8276 
   8277 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8278 		NQRXC_STATUS_DD)) {
   8279 		/* We have processed all of the receive descriptors. */
   8280 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8281 		return false;
   8282 	}
   8283 
   8284 	return true;
   8285 }
   8286 
   8287 static inline bool
   8288 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   8289     struct mbuf *m)
   8290 {
   8291 
   8292 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8293 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8294 		vlan_set_tag(m, le16toh(vlantag));
   8295 	}
   8296 
   8297 	return true;
   8298 }
   8299 
   8300 static inline void
   8301 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8302     uint32_t errors, struct mbuf *m)
   8303 {
   8304 	struct wm_softc *sc = rxq->rxq_sc;
   8305 
   8306 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8307 		if (wm_rxdesc_is_set_status(sc, status,
   8308 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8309 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8310 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8311 			if (wm_rxdesc_is_set_error(sc, errors,
   8312 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8313 				m->m_pkthdr.csum_flags |=
   8314 					M_CSUM_IPv4_BAD;
   8315 		}
   8316 		if (wm_rxdesc_is_set_status(sc, status,
   8317 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8318 			/*
   8319 			 * Note: we don't know if this was TCP or UDP,
   8320 			 * so we just set both bits, and expect the
   8321 			 * upper layers to deal.
   8322 			 */
   8323 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8324 			m->m_pkthdr.csum_flags |=
   8325 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8326 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8327 			if (wm_rxdesc_is_set_error(sc, errors,
   8328 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8329 				m->m_pkthdr.csum_flags |=
   8330 					M_CSUM_TCP_UDP_BAD;
   8331 		}
   8332 	}
   8333 }
   8334 
   8335 /*
   8336  * wm_rxeof:
   8337  *
   8338  *	Helper; handle receive interrupts.
   8339  */
   8340 static void
   8341 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8342 {
   8343 	struct wm_softc *sc = rxq->rxq_sc;
   8344 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8345 	struct wm_rxsoft *rxs;
   8346 	struct mbuf *m;
   8347 	int i, len;
   8348 	int count = 0;
   8349 	uint32_t status, errors;
   8350 	uint16_t vlantag;
   8351 
   8352 	KASSERT(mutex_owned(rxq->rxq_lock));
   8353 
   8354 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8355 		if (limit-- == 0) {
   8356 			rxq->rxq_ptr = i;
   8357 			break;
   8358 		}
   8359 
   8360 		rxs = &rxq->rxq_soft[i];
   8361 
   8362 		DPRINTF(WM_DEBUG_RX,
   8363 		    ("%s: RX: checking descriptor %d\n",
   8364 		    device_xname(sc->sc_dev), i));
   8365 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8366 
   8367 		status = wm_rxdesc_get_status(rxq, i);
   8368 		errors = wm_rxdesc_get_errors(rxq, i);
   8369 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8370 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8371 #ifdef WM_DEBUG
   8372 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8373 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8374 #endif
   8375 
   8376 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8377 			/*
   8378 			 * Update the receive pointer holding rxq_lock
   8379 			 * consistent with increment counter.
   8380 			 */
   8381 			rxq->rxq_ptr = i;
   8382 			break;
   8383 		}
   8384 
   8385 		count++;
   8386 		if (__predict_false(rxq->rxq_discard)) {
   8387 			DPRINTF(WM_DEBUG_RX,
   8388 			    ("%s: RX: discarding contents of descriptor %d\n",
   8389 			    device_xname(sc->sc_dev), i));
   8390 			wm_init_rxdesc(rxq, i);
   8391 			if (wm_rxdesc_is_eop(rxq, status)) {
   8392 				/* Reset our state. */
   8393 				DPRINTF(WM_DEBUG_RX,
   8394 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8395 				    device_xname(sc->sc_dev)));
   8396 				rxq->rxq_discard = 0;
   8397 			}
   8398 			continue;
   8399 		}
   8400 
   8401 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8402 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8403 
   8404 		m = rxs->rxs_mbuf;
   8405 
   8406 		/*
   8407 		 * Add a new receive buffer to the ring, unless of
   8408 		 * course the length is zero. Treat the latter as a
   8409 		 * failed mapping.
   8410 		 */
   8411 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8412 			/*
   8413 			 * Failed, throw away what we've done so
   8414 			 * far, and discard the rest of the packet.
   8415 			 */
   8416 			ifp->if_ierrors++;
   8417 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8418 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8419 			wm_init_rxdesc(rxq, i);
   8420 			if (!wm_rxdesc_is_eop(rxq, status))
   8421 				rxq->rxq_discard = 1;
   8422 			if (rxq->rxq_head != NULL)
   8423 				m_freem(rxq->rxq_head);
   8424 			WM_RXCHAIN_RESET(rxq);
   8425 			DPRINTF(WM_DEBUG_RX,
   8426 			    ("%s: RX: Rx buffer allocation failed, "
   8427 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8428 			    rxq->rxq_discard ? " (discard)" : ""));
   8429 			continue;
   8430 		}
   8431 
   8432 		m->m_len = len;
   8433 		rxq->rxq_len += len;
   8434 		DPRINTF(WM_DEBUG_RX,
   8435 		    ("%s: RX: buffer at %p len %d\n",
   8436 		    device_xname(sc->sc_dev), m->m_data, len));
   8437 
   8438 		/* If this is not the end of the packet, keep looking. */
   8439 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8440 			WM_RXCHAIN_LINK(rxq, m);
   8441 			DPRINTF(WM_DEBUG_RX,
   8442 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8443 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8444 			continue;
   8445 		}
   8446 
   8447 		/*
   8448 		 * Okay, we have the entire packet now.  The chip is
   8449 		 * configured to include the FCS except I350 and I21[01]
   8450 		 * (not all chips can be configured to strip it),
   8451 		 * so we need to trim it.
   8452 		 * May need to adjust length of previous mbuf in the
   8453 		 * chain if the current mbuf is too short.
   8454 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8455 		 * is always set in I350, so we don't trim it.
   8456 		 */
   8457 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8458 		    && (sc->sc_type != WM_T_I210)
   8459 		    && (sc->sc_type != WM_T_I211)) {
   8460 			if (m->m_len < ETHER_CRC_LEN) {
   8461 				rxq->rxq_tail->m_len
   8462 				    -= (ETHER_CRC_LEN - m->m_len);
   8463 				m->m_len = 0;
   8464 			} else
   8465 				m->m_len -= ETHER_CRC_LEN;
   8466 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8467 		} else
   8468 			len = rxq->rxq_len;
   8469 
   8470 		WM_RXCHAIN_LINK(rxq, m);
   8471 
   8472 		*rxq->rxq_tailp = NULL;
   8473 		m = rxq->rxq_head;
   8474 
   8475 		WM_RXCHAIN_RESET(rxq);
   8476 
   8477 		DPRINTF(WM_DEBUG_RX,
   8478 		    ("%s: RX: have entire packet, len -> %d\n",
   8479 		    device_xname(sc->sc_dev), len));
   8480 
   8481 		/* If an error occurred, update stats and drop the packet. */
   8482 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8483 			m_freem(m);
   8484 			continue;
   8485 		}
   8486 
   8487 		/* No errors.  Receive the packet. */
   8488 		m_set_rcvif(m, ifp);
   8489 		m->m_pkthdr.len = len;
   8490 		/*
   8491 		 * TODO
   8492 		 * should be save rsshash and rsstype to this mbuf.
   8493 		 */
   8494 		DPRINTF(WM_DEBUG_RX,
   8495 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8496 			device_xname(sc->sc_dev), rsstype, rsshash));
   8497 
   8498 		/*
   8499 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8500 		 * for us.  Associate the tag with the packet.
   8501 		 */
   8502 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8503 			continue;
   8504 
   8505 		/* Set up checksum info for this packet. */
   8506 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8507 		/*
   8508 		 * Update the receive pointer holding rxq_lock consistent with
   8509 		 * increment counter.
   8510 		 */
   8511 		rxq->rxq_ptr = i;
   8512 		rxq->rxq_packets++;
   8513 		rxq->rxq_bytes += len;
   8514 		mutex_exit(rxq->rxq_lock);
   8515 
   8516 		/* Pass it on. */
   8517 		if_percpuq_enqueue(sc->sc_ipq, m);
   8518 
   8519 		mutex_enter(rxq->rxq_lock);
   8520 
   8521 		if (rxq->rxq_stopping)
   8522 			break;
   8523 	}
   8524 
   8525 	if (count != 0)
   8526 		rnd_add_uint32(&sc->rnd_source, count);
   8527 
   8528 	DPRINTF(WM_DEBUG_RX,
   8529 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8530 }
   8531 
   8532 /*
   8533  * wm_linkintr_gmii:
   8534  *
   8535  *	Helper; handle link interrupts for GMII.
   8536  */
   8537 static void
   8538 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8539 {
   8540 
   8541 	KASSERT(WM_CORE_LOCKED(sc));
   8542 
   8543 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8544 		__func__));
   8545 
   8546 	if (icr & ICR_LSC) {
   8547 		uint32_t reg;
   8548 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8549 
   8550 		if ((status & STATUS_LU) != 0) {
   8551 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8552 				device_xname(sc->sc_dev),
   8553 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8554 		} else {
   8555 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8556 				device_xname(sc->sc_dev)));
   8557 		}
   8558 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8559 			wm_gig_downshift_workaround_ich8lan(sc);
   8560 
   8561 		if ((sc->sc_type == WM_T_ICH8)
   8562 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8563 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8564 		}
   8565 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8566 			device_xname(sc->sc_dev)));
   8567 		mii_pollstat(&sc->sc_mii);
   8568 		if (sc->sc_type == WM_T_82543) {
   8569 			int miistatus, active;
   8570 
   8571 			/*
   8572 			 * With 82543, we need to force speed and
   8573 			 * duplex on the MAC equal to what the PHY
   8574 			 * speed and duplex configuration is.
   8575 			 */
   8576 			miistatus = sc->sc_mii.mii_media_status;
   8577 
   8578 			if (miistatus & IFM_ACTIVE) {
   8579 				active = sc->sc_mii.mii_media_active;
   8580 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8581 				switch (IFM_SUBTYPE(active)) {
   8582 				case IFM_10_T:
   8583 					sc->sc_ctrl |= CTRL_SPEED_10;
   8584 					break;
   8585 				case IFM_100_TX:
   8586 					sc->sc_ctrl |= CTRL_SPEED_100;
   8587 					break;
   8588 				case IFM_1000_T:
   8589 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8590 					break;
   8591 				default:
   8592 					/*
   8593 					 * fiber?
   8594 					 * Shoud not enter here.
   8595 					 */
   8596 					printf("unknown media (%x)\n", active);
   8597 					break;
   8598 				}
   8599 				if (active & IFM_FDX)
   8600 					sc->sc_ctrl |= CTRL_FD;
   8601 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8602 			}
   8603 		} else if (sc->sc_type == WM_T_PCH) {
   8604 			wm_k1_gig_workaround_hv(sc,
   8605 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8606 		}
   8607 
   8608 		if ((sc->sc_phytype == WMPHY_82578)
   8609 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8610 			== IFM_1000_T)) {
   8611 
   8612 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8613 				delay(200*1000); /* XXX too big */
   8614 
   8615 				/* Link stall fix for link up */
   8616 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8617 				    HV_MUX_DATA_CTRL,
   8618 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8619 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8620 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8621 				    HV_MUX_DATA_CTRL,
   8622 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8623 			}
   8624 		}
   8625 		/*
   8626 		 * I217 Packet Loss issue:
   8627 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8628 		 * on power up.
   8629 		 * Set the Beacon Duration for I217 to 8 usec
   8630 		 */
   8631 		if ((sc->sc_type == WM_T_PCH_LPT)
   8632 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8633 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8634 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8635 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8636 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8637 		}
   8638 
   8639 		/* XXX Work-around I218 hang issue */
   8640 		/* e1000_k1_workaround_lpt_lp() */
   8641 
   8642 		if ((sc->sc_type == WM_T_PCH_LPT)
   8643 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8644 			/*
   8645 			 * Set platform power management values for Latency
   8646 			 * Tolerance Reporting (LTR)
   8647 			 */
   8648 			wm_platform_pm_pch_lpt(sc,
   8649 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8650 				    != 0));
   8651 		}
   8652 
   8653 		/* FEXTNVM6 K1-off workaround */
   8654 		if (sc->sc_type == WM_T_PCH_SPT) {
   8655 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8656 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8657 			    & FEXTNVM6_K1_OFF_ENABLE)
   8658 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8659 			else
   8660 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8661 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8662 		}
   8663 	} else if (icr & ICR_RXSEQ) {
   8664 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8665 			device_xname(sc->sc_dev)));
   8666 	}
   8667 }
   8668 
   8669 /*
   8670  * wm_linkintr_tbi:
   8671  *
   8672  *	Helper; handle link interrupts for TBI mode.
   8673  */
   8674 static void
   8675 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8676 {
   8677 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8678 	uint32_t status;
   8679 
   8680 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8681 		__func__));
   8682 
   8683 	status = CSR_READ(sc, WMREG_STATUS);
   8684 	if (icr & ICR_LSC) {
   8685 		if (status & STATUS_LU) {
   8686 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8687 			    device_xname(sc->sc_dev),
   8688 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8689 			/*
   8690 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8691 			 * so we should update sc->sc_ctrl
   8692 			 */
   8693 
   8694 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8695 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8696 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8697 			if (status & STATUS_FD)
   8698 				sc->sc_tctl |=
   8699 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8700 			else
   8701 				sc->sc_tctl |=
   8702 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8703 			if (sc->sc_ctrl & CTRL_TFCE)
   8704 				sc->sc_fcrtl |= FCRTL_XONE;
   8705 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8706 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8707 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8708 				      sc->sc_fcrtl);
   8709 			sc->sc_tbi_linkup = 1;
   8710 			if_link_state_change(ifp, LINK_STATE_UP);
   8711 		} else {
   8712 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8713 			    device_xname(sc->sc_dev)));
   8714 			sc->sc_tbi_linkup = 0;
   8715 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8716 		}
   8717 		/* Update LED */
   8718 		wm_tbi_serdes_set_linkled(sc);
   8719 	} else if (icr & ICR_RXSEQ) {
   8720 		DPRINTF(WM_DEBUG_LINK,
   8721 		    ("%s: LINK: Receive sequence error\n",
   8722 		    device_xname(sc->sc_dev)));
   8723 	}
   8724 }
   8725 
   8726 /*
   8727  * wm_linkintr_serdes:
   8728  *
   8729  *	Helper; handle link interrupts for TBI mode.
   8730  */
   8731 static void
   8732 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8733 {
   8734 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8735 	struct mii_data *mii = &sc->sc_mii;
   8736 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8737 	uint32_t pcs_adv, pcs_lpab, reg;
   8738 
   8739 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8740 		__func__));
   8741 
   8742 	if (icr & ICR_LSC) {
   8743 		/* Check PCS */
   8744 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8745 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8746 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8747 				device_xname(sc->sc_dev)));
   8748 			mii->mii_media_status |= IFM_ACTIVE;
   8749 			sc->sc_tbi_linkup = 1;
   8750 			if_link_state_change(ifp, LINK_STATE_UP);
   8751 		} else {
   8752 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8753 				device_xname(sc->sc_dev)));
   8754 			mii->mii_media_status |= IFM_NONE;
   8755 			sc->sc_tbi_linkup = 0;
   8756 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8757 			wm_tbi_serdes_set_linkled(sc);
   8758 			return;
   8759 		}
   8760 		mii->mii_media_active |= IFM_1000_SX;
   8761 		if ((reg & PCS_LSTS_FDX) != 0)
   8762 			mii->mii_media_active |= IFM_FDX;
   8763 		else
   8764 			mii->mii_media_active |= IFM_HDX;
   8765 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8766 			/* Check flow */
   8767 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8768 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8769 				DPRINTF(WM_DEBUG_LINK,
   8770 				    ("XXX LINKOK but not ACOMP\n"));
   8771 				return;
   8772 			}
   8773 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8774 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8775 			DPRINTF(WM_DEBUG_LINK,
   8776 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8777 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8778 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8779 				mii->mii_media_active |= IFM_FLOW
   8780 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8781 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8782 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8783 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8784 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8785 				mii->mii_media_active |= IFM_FLOW
   8786 				    | IFM_ETH_TXPAUSE;
   8787 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8788 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8789 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8790 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8791 				mii->mii_media_active |= IFM_FLOW
   8792 				    | IFM_ETH_RXPAUSE;
   8793 		}
   8794 		/* Update LED */
   8795 		wm_tbi_serdes_set_linkled(sc);
   8796 	} else {
   8797 		DPRINTF(WM_DEBUG_LINK,
   8798 		    ("%s: LINK: Receive sequence error\n",
   8799 		    device_xname(sc->sc_dev)));
   8800 	}
   8801 }
   8802 
   8803 /*
   8804  * wm_linkintr:
   8805  *
   8806  *	Helper; handle link interrupts.
   8807  */
   8808 static void
   8809 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8810 {
   8811 
   8812 	KASSERT(WM_CORE_LOCKED(sc));
   8813 
   8814 	if (sc->sc_flags & WM_F_HAS_MII)
   8815 		wm_linkintr_gmii(sc, icr);
   8816 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8817 	    && (sc->sc_type >= WM_T_82575))
   8818 		wm_linkintr_serdes(sc, icr);
   8819 	else
   8820 		wm_linkintr_tbi(sc, icr);
   8821 }
   8822 
   8823 /*
   8824  * wm_intr_legacy:
   8825  *
   8826  *	Interrupt service routine for INTx and MSI.
   8827  */
   8828 static int
   8829 wm_intr_legacy(void *arg)
   8830 {
   8831 	struct wm_softc *sc = arg;
   8832 	struct wm_queue *wmq = &sc->sc_queue[0];
   8833 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8834 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8835 	uint32_t icr, rndval = 0;
   8836 	int handled = 0;
   8837 
   8838 	while (1 /* CONSTCOND */) {
   8839 		icr = CSR_READ(sc, WMREG_ICR);
   8840 		if ((icr & sc->sc_icr) == 0)
   8841 			break;
   8842 		if (handled == 0) {
   8843 			DPRINTF(WM_DEBUG_TX,
   8844 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8845 		}
   8846 		if (rndval == 0)
   8847 			rndval = icr;
   8848 
   8849 		mutex_enter(rxq->rxq_lock);
   8850 
   8851 		if (rxq->rxq_stopping) {
   8852 			mutex_exit(rxq->rxq_lock);
   8853 			break;
   8854 		}
   8855 
   8856 		handled = 1;
   8857 
   8858 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8859 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8860 			DPRINTF(WM_DEBUG_RX,
   8861 			    ("%s: RX: got Rx intr 0x%08x\n",
   8862 			    device_xname(sc->sc_dev),
   8863 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8864 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8865 		}
   8866 #endif
   8867 		/*
   8868 		 * wm_rxeof() does *not* call upper layer functions directly,
   8869 		 * as if_percpuq_enqueue() just call softint_schedule().
   8870 		 * So, we can call wm_rxeof() in interrupt context.
   8871 		 */
   8872 		wm_rxeof(rxq, UINT_MAX);
   8873 
   8874 		mutex_exit(rxq->rxq_lock);
   8875 		mutex_enter(txq->txq_lock);
   8876 
   8877 		if (txq->txq_stopping) {
   8878 			mutex_exit(txq->txq_lock);
   8879 			break;
   8880 		}
   8881 
   8882 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8883 		if (icr & ICR_TXDW) {
   8884 			DPRINTF(WM_DEBUG_TX,
   8885 			    ("%s: TX: got TXDW interrupt\n",
   8886 			    device_xname(sc->sc_dev)));
   8887 			WM_Q_EVCNT_INCR(txq, txdw);
   8888 		}
   8889 #endif
   8890 		wm_txeof(sc, txq);
   8891 
   8892 		mutex_exit(txq->txq_lock);
   8893 		WM_CORE_LOCK(sc);
   8894 
   8895 		if (sc->sc_core_stopping) {
   8896 			WM_CORE_UNLOCK(sc);
   8897 			break;
   8898 		}
   8899 
   8900 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8901 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8902 			wm_linkintr(sc, icr);
   8903 		}
   8904 
   8905 		WM_CORE_UNLOCK(sc);
   8906 
   8907 		if (icr & ICR_RXO) {
   8908 #if defined(WM_DEBUG)
   8909 			log(LOG_WARNING, "%s: Receive overrun\n",
   8910 			    device_xname(sc->sc_dev));
   8911 #endif /* defined(WM_DEBUG) */
   8912 		}
   8913 	}
   8914 
   8915 	rnd_add_uint32(&sc->rnd_source, rndval);
   8916 
   8917 	if (handled) {
   8918 		/* Try to get more packets going. */
   8919 		softint_schedule(wmq->wmq_si);
   8920 	}
   8921 
   8922 	return handled;
   8923 }
   8924 
   8925 static inline void
   8926 wm_txrxintr_disable(struct wm_queue *wmq)
   8927 {
   8928 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8929 
   8930 	if (sc->sc_type == WM_T_82574)
   8931 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8932 	else if (sc->sc_type == WM_T_82575)
   8933 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8934 	else
   8935 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8936 }
   8937 
   8938 static inline void
   8939 wm_txrxintr_enable(struct wm_queue *wmq)
   8940 {
   8941 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8942 
   8943 	wm_itrs_calculate(sc, wmq);
   8944 
   8945 	if (sc->sc_type == WM_T_82574)
   8946 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8947 	else if (sc->sc_type == WM_T_82575)
   8948 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8949 	else
   8950 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8951 }
   8952 
   8953 static int
   8954 wm_txrxintr_msix(void *arg)
   8955 {
   8956 	struct wm_queue *wmq = arg;
   8957 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8958 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8959 	struct wm_softc *sc = txq->txq_sc;
   8960 	u_int limit = sc->sc_rx_intr_process_limit;
   8961 
   8962 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8963 
   8964 	DPRINTF(WM_DEBUG_TX,
   8965 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8966 
   8967 	wm_txrxintr_disable(wmq);
   8968 
   8969 	mutex_enter(txq->txq_lock);
   8970 
   8971 	if (txq->txq_stopping) {
   8972 		mutex_exit(txq->txq_lock);
   8973 		return 0;
   8974 	}
   8975 
   8976 	WM_Q_EVCNT_INCR(txq, txdw);
   8977 	wm_txeof(sc, txq);
   8978 	/* wm_deferred start() is done in wm_handle_queue(). */
   8979 	mutex_exit(txq->txq_lock);
   8980 
   8981 	DPRINTF(WM_DEBUG_RX,
   8982 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8983 	mutex_enter(rxq->rxq_lock);
   8984 
   8985 	if (rxq->rxq_stopping) {
   8986 		mutex_exit(rxq->rxq_lock);
   8987 		return 0;
   8988 	}
   8989 
   8990 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8991 	wm_rxeof(rxq, limit);
   8992 	mutex_exit(rxq->rxq_lock);
   8993 
   8994 	wm_itrs_writereg(sc, wmq);
   8995 
   8996 	softint_schedule(wmq->wmq_si);
   8997 
   8998 	return 1;
   8999 }
   9000 
   9001 static void
   9002 wm_handle_queue(void *arg)
   9003 {
   9004 	struct wm_queue *wmq = arg;
   9005 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9006 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9007 	struct wm_softc *sc = txq->txq_sc;
   9008 	u_int limit = sc->sc_rx_process_limit;
   9009 
   9010 	mutex_enter(txq->txq_lock);
   9011 	if (txq->txq_stopping) {
   9012 		mutex_exit(txq->txq_lock);
   9013 		return;
   9014 	}
   9015 	wm_txeof(sc, txq);
   9016 	wm_deferred_start_locked(txq);
   9017 	mutex_exit(txq->txq_lock);
   9018 
   9019 	mutex_enter(rxq->rxq_lock);
   9020 	if (rxq->rxq_stopping) {
   9021 		mutex_exit(rxq->rxq_lock);
   9022 		return;
   9023 	}
   9024 	WM_Q_EVCNT_INCR(rxq, rxintr);
   9025 	wm_rxeof(rxq, limit);
   9026 	mutex_exit(rxq->rxq_lock);
   9027 
   9028 	wm_txrxintr_enable(wmq);
   9029 }
   9030 
   9031 /*
   9032  * wm_linkintr_msix:
   9033  *
   9034  *	Interrupt service routine for link status change for MSI-X.
   9035  */
   9036 static int
   9037 wm_linkintr_msix(void *arg)
   9038 {
   9039 	struct wm_softc *sc = arg;
   9040 	uint32_t reg;
   9041 
   9042 	DPRINTF(WM_DEBUG_LINK,
   9043 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9044 
   9045 	reg = CSR_READ(sc, WMREG_ICR);
   9046 	WM_CORE_LOCK(sc);
   9047 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   9048 		goto out;
   9049 
   9050 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9051 	wm_linkintr(sc, ICR_LSC);
   9052 
   9053 out:
   9054 	WM_CORE_UNLOCK(sc);
   9055 
   9056 	if (sc->sc_type == WM_T_82574)
   9057 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9058 	else if (sc->sc_type == WM_T_82575)
   9059 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9060 	else
   9061 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9062 
   9063 	return 1;
   9064 }
   9065 
   9066 /*
   9067  * Media related.
   9068  * GMII, SGMII, TBI (and SERDES)
   9069  */
   9070 
   9071 /* Common */
   9072 
   9073 /*
   9074  * wm_tbi_serdes_set_linkled:
   9075  *
   9076  *	Update the link LED on TBI and SERDES devices.
   9077  */
   9078 static void
   9079 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9080 {
   9081 
   9082 	if (sc->sc_tbi_linkup)
   9083 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9084 	else
   9085 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9086 
   9087 	/* 82540 or newer devices are active low */
   9088 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9089 
   9090 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9091 }
   9092 
   9093 /* GMII related */
   9094 
   9095 /*
   9096  * wm_gmii_reset:
   9097  *
   9098  *	Reset the PHY.
   9099  */
   9100 static void
   9101 wm_gmii_reset(struct wm_softc *sc)
   9102 {
   9103 	uint32_t reg;
   9104 	int rv;
   9105 
   9106 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9107 		device_xname(sc->sc_dev), __func__));
   9108 
   9109 	rv = sc->phy.acquire(sc);
   9110 	if (rv != 0) {
   9111 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9112 		    __func__);
   9113 		return;
   9114 	}
   9115 
   9116 	switch (sc->sc_type) {
   9117 	case WM_T_82542_2_0:
   9118 	case WM_T_82542_2_1:
   9119 		/* null */
   9120 		break;
   9121 	case WM_T_82543:
   9122 		/*
   9123 		 * With 82543, we need to force speed and duplex on the MAC
   9124 		 * equal to what the PHY speed and duplex configuration is.
   9125 		 * In addition, we need to perform a hardware reset on the PHY
   9126 		 * to take it out of reset.
   9127 		 */
   9128 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9129 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9130 
   9131 		/* The PHY reset pin is active-low. */
   9132 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9133 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9134 		    CTRL_EXT_SWDPIN(4));
   9135 		reg |= CTRL_EXT_SWDPIO(4);
   9136 
   9137 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9138 		CSR_WRITE_FLUSH(sc);
   9139 		delay(10*1000);
   9140 
   9141 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9142 		CSR_WRITE_FLUSH(sc);
   9143 		delay(150);
   9144 #if 0
   9145 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9146 #endif
   9147 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9148 		break;
   9149 	case WM_T_82544:	/* reset 10000us */
   9150 	case WM_T_82540:
   9151 	case WM_T_82545:
   9152 	case WM_T_82545_3:
   9153 	case WM_T_82546:
   9154 	case WM_T_82546_3:
   9155 	case WM_T_82541:
   9156 	case WM_T_82541_2:
   9157 	case WM_T_82547:
   9158 	case WM_T_82547_2:
   9159 	case WM_T_82571:	/* reset 100us */
   9160 	case WM_T_82572:
   9161 	case WM_T_82573:
   9162 	case WM_T_82574:
   9163 	case WM_T_82575:
   9164 	case WM_T_82576:
   9165 	case WM_T_82580:
   9166 	case WM_T_I350:
   9167 	case WM_T_I354:
   9168 	case WM_T_I210:
   9169 	case WM_T_I211:
   9170 	case WM_T_82583:
   9171 	case WM_T_80003:
   9172 		/* generic reset */
   9173 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9174 		CSR_WRITE_FLUSH(sc);
   9175 		delay(20000);
   9176 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9177 		CSR_WRITE_FLUSH(sc);
   9178 		delay(20000);
   9179 
   9180 		if ((sc->sc_type == WM_T_82541)
   9181 		    || (sc->sc_type == WM_T_82541_2)
   9182 		    || (sc->sc_type == WM_T_82547)
   9183 		    || (sc->sc_type == WM_T_82547_2)) {
   9184 			/* workaround for igp are done in igp_reset() */
   9185 			/* XXX add code to set LED after phy reset */
   9186 		}
   9187 		break;
   9188 	case WM_T_ICH8:
   9189 	case WM_T_ICH9:
   9190 	case WM_T_ICH10:
   9191 	case WM_T_PCH:
   9192 	case WM_T_PCH2:
   9193 	case WM_T_PCH_LPT:
   9194 	case WM_T_PCH_SPT:
   9195 		/* generic reset */
   9196 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9197 		CSR_WRITE_FLUSH(sc);
   9198 		delay(100);
   9199 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9200 		CSR_WRITE_FLUSH(sc);
   9201 		delay(150);
   9202 		break;
   9203 	default:
   9204 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9205 		    __func__);
   9206 		break;
   9207 	}
   9208 
   9209 	sc->phy.release(sc);
   9210 
   9211 	/* get_cfg_done */
   9212 	wm_get_cfg_done(sc);
   9213 
   9214 	/* extra setup */
   9215 	switch (sc->sc_type) {
   9216 	case WM_T_82542_2_0:
   9217 	case WM_T_82542_2_1:
   9218 	case WM_T_82543:
   9219 	case WM_T_82544:
   9220 	case WM_T_82540:
   9221 	case WM_T_82545:
   9222 	case WM_T_82545_3:
   9223 	case WM_T_82546:
   9224 	case WM_T_82546_3:
   9225 	case WM_T_82541_2:
   9226 	case WM_T_82547_2:
   9227 	case WM_T_82571:
   9228 	case WM_T_82572:
   9229 	case WM_T_82573:
   9230 	case WM_T_82574:
   9231 	case WM_T_82583:
   9232 	case WM_T_82575:
   9233 	case WM_T_82576:
   9234 	case WM_T_82580:
   9235 	case WM_T_I350:
   9236 	case WM_T_I354:
   9237 	case WM_T_I210:
   9238 	case WM_T_I211:
   9239 	case WM_T_80003:
   9240 		/* null */
   9241 		break;
   9242 	case WM_T_82541:
   9243 	case WM_T_82547:
   9244 		/* XXX Configure actively LED after PHY reset */
   9245 		break;
   9246 	case WM_T_ICH8:
   9247 	case WM_T_ICH9:
   9248 	case WM_T_ICH10:
   9249 	case WM_T_PCH:
   9250 	case WM_T_PCH2:
   9251 	case WM_T_PCH_LPT:
   9252 	case WM_T_PCH_SPT:
   9253 		wm_phy_post_reset(sc);
   9254 		break;
   9255 	default:
   9256 		panic("%s: unknown type\n", __func__);
   9257 		break;
   9258 	}
   9259 }
   9260 
   9261 /*
   9262  * Setup sc_phytype and mii_{read|write}reg.
   9263  *
   9264  *  To identify PHY type, correct read/write function should be selected.
   9265  * To select correct read/write function, PCI ID or MAC type are required
   9266  * without accessing PHY registers.
   9267  *
   9268  *  On the first call of this function, PHY ID is not known yet. Check
   9269  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9270  * result might be incorrect.
   9271  *
   9272  *  In the second call, PHY OUI and model is used to identify PHY type.
   9273  * It might not be perfpect because of the lack of compared entry, but it
   9274  * would be better than the first call.
   9275  *
   9276  *  If the detected new result and previous assumption is different,
   9277  * diagnous message will be printed.
   9278  */
   9279 static void
   9280 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9281     uint16_t phy_model)
   9282 {
   9283 	device_t dev = sc->sc_dev;
   9284 	struct mii_data *mii = &sc->sc_mii;
   9285 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9286 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9287 	mii_readreg_t new_readreg;
   9288 	mii_writereg_t new_writereg;
   9289 
   9290 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9291 		device_xname(sc->sc_dev), __func__));
   9292 
   9293 	if (mii->mii_readreg == NULL) {
   9294 		/*
   9295 		 *  This is the first call of this function. For ICH and PCH
   9296 		 * variants, it's difficult to determine the PHY access method
   9297 		 * by sc_type, so use the PCI product ID for some devices.
   9298 		 */
   9299 
   9300 		switch (sc->sc_pcidevid) {
   9301 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9302 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9303 			/* 82577 */
   9304 			new_phytype = WMPHY_82577;
   9305 			break;
   9306 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9307 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9308 			/* 82578 */
   9309 			new_phytype = WMPHY_82578;
   9310 			break;
   9311 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9312 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9313 			/* 82579 */
   9314 			new_phytype = WMPHY_82579;
   9315 			break;
   9316 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9317 		case PCI_PRODUCT_INTEL_82801I_BM:
   9318 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9319 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9320 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9321 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9322 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9323 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9324 			/* ICH8, 9, 10 with 82567 */
   9325 			new_phytype = WMPHY_BM;
   9326 			break;
   9327 		default:
   9328 			break;
   9329 		}
   9330 	} else {
   9331 		/* It's not the first call. Use PHY OUI and model */
   9332 		switch (phy_oui) {
   9333 		case MII_OUI_ATHEROS: /* XXX ??? */
   9334 			switch (phy_model) {
   9335 			case 0x0004: /* XXX */
   9336 				new_phytype = WMPHY_82578;
   9337 				break;
   9338 			default:
   9339 				break;
   9340 			}
   9341 			break;
   9342 		case MII_OUI_xxMARVELL:
   9343 			switch (phy_model) {
   9344 			case MII_MODEL_xxMARVELL_I210:
   9345 				new_phytype = WMPHY_I210;
   9346 				break;
   9347 			case MII_MODEL_xxMARVELL_E1011:
   9348 			case MII_MODEL_xxMARVELL_E1000_3:
   9349 			case MII_MODEL_xxMARVELL_E1000_5:
   9350 			case MII_MODEL_xxMARVELL_E1112:
   9351 				new_phytype = WMPHY_M88;
   9352 				break;
   9353 			case MII_MODEL_xxMARVELL_E1149:
   9354 				new_phytype = WMPHY_BM;
   9355 				break;
   9356 			case MII_MODEL_xxMARVELL_E1111:
   9357 			case MII_MODEL_xxMARVELL_I347:
   9358 			case MII_MODEL_xxMARVELL_E1512:
   9359 			case MII_MODEL_xxMARVELL_E1340M:
   9360 			case MII_MODEL_xxMARVELL_E1543:
   9361 				new_phytype = WMPHY_M88;
   9362 				break;
   9363 			case MII_MODEL_xxMARVELL_I82563:
   9364 				new_phytype = WMPHY_GG82563;
   9365 				break;
   9366 			default:
   9367 				break;
   9368 			}
   9369 			break;
   9370 		case MII_OUI_INTEL:
   9371 			switch (phy_model) {
   9372 			case MII_MODEL_INTEL_I82577:
   9373 				new_phytype = WMPHY_82577;
   9374 				break;
   9375 			case MII_MODEL_INTEL_I82579:
   9376 				new_phytype = WMPHY_82579;
   9377 				break;
   9378 			case MII_MODEL_INTEL_I217:
   9379 				new_phytype = WMPHY_I217;
   9380 				break;
   9381 			case MII_MODEL_INTEL_I82580:
   9382 			case MII_MODEL_INTEL_I350:
   9383 				new_phytype = WMPHY_82580;
   9384 				break;
   9385 			default:
   9386 				break;
   9387 			}
   9388 			break;
   9389 		case MII_OUI_yyINTEL:
   9390 			switch (phy_model) {
   9391 			case MII_MODEL_yyINTEL_I82562G:
   9392 			case MII_MODEL_yyINTEL_I82562EM:
   9393 			case MII_MODEL_yyINTEL_I82562ET:
   9394 				new_phytype = WMPHY_IFE;
   9395 				break;
   9396 			case MII_MODEL_yyINTEL_IGP01E1000:
   9397 				new_phytype = WMPHY_IGP;
   9398 				break;
   9399 			case MII_MODEL_yyINTEL_I82566:
   9400 				new_phytype = WMPHY_IGP_3;
   9401 				break;
   9402 			default:
   9403 				break;
   9404 			}
   9405 			break;
   9406 		default:
   9407 			break;
   9408 		}
   9409 		if (new_phytype == WMPHY_UNKNOWN)
   9410 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9411 			    __func__);
   9412 
   9413 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9414 		    && (sc->sc_phytype != new_phytype )) {
   9415 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9416 			    "was incorrect. PHY type from PHY ID = %u\n",
   9417 			    sc->sc_phytype, new_phytype);
   9418 		}
   9419 	}
   9420 
   9421 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9422 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9423 		/* SGMII */
   9424 		new_readreg = wm_sgmii_readreg;
   9425 		new_writereg = wm_sgmii_writereg;
   9426 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9427 		/* BM2 (phyaddr == 1) */
   9428 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9429 		    && (new_phytype != WMPHY_BM)
   9430 		    && (new_phytype != WMPHY_UNKNOWN))
   9431 			doubt_phytype = new_phytype;
   9432 		new_phytype = WMPHY_BM;
   9433 		new_readreg = wm_gmii_bm_readreg;
   9434 		new_writereg = wm_gmii_bm_writereg;
   9435 	} else if (sc->sc_type >= WM_T_PCH) {
   9436 		/* All PCH* use _hv_ */
   9437 		new_readreg = wm_gmii_hv_readreg;
   9438 		new_writereg = wm_gmii_hv_writereg;
   9439 	} else if (sc->sc_type >= WM_T_ICH8) {
   9440 		/* non-82567 ICH8, 9 and 10 */
   9441 		new_readreg = wm_gmii_i82544_readreg;
   9442 		new_writereg = wm_gmii_i82544_writereg;
   9443 	} else if (sc->sc_type >= WM_T_80003) {
   9444 		/* 80003 */
   9445 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9446 		    && (new_phytype != WMPHY_GG82563)
   9447 		    && (new_phytype != WMPHY_UNKNOWN))
   9448 			doubt_phytype = new_phytype;
   9449 		new_phytype = WMPHY_GG82563;
   9450 		new_readreg = wm_gmii_i80003_readreg;
   9451 		new_writereg = wm_gmii_i80003_writereg;
   9452 	} else if (sc->sc_type >= WM_T_I210) {
   9453 		/* I210 and I211 */
   9454 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9455 		    && (new_phytype != WMPHY_I210)
   9456 		    && (new_phytype != WMPHY_UNKNOWN))
   9457 			doubt_phytype = new_phytype;
   9458 		new_phytype = WMPHY_I210;
   9459 		new_readreg = wm_gmii_gs40g_readreg;
   9460 		new_writereg = wm_gmii_gs40g_writereg;
   9461 	} else if (sc->sc_type >= WM_T_82580) {
   9462 		/* 82580, I350 and I354 */
   9463 		new_readreg = wm_gmii_82580_readreg;
   9464 		new_writereg = wm_gmii_82580_writereg;
   9465 	} else if (sc->sc_type >= WM_T_82544) {
   9466 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9467 		new_readreg = wm_gmii_i82544_readreg;
   9468 		new_writereg = wm_gmii_i82544_writereg;
   9469 	} else {
   9470 		new_readreg = wm_gmii_i82543_readreg;
   9471 		new_writereg = wm_gmii_i82543_writereg;
   9472 	}
   9473 
   9474 	if (new_phytype == WMPHY_BM) {
   9475 		/* All BM use _bm_ */
   9476 		new_readreg = wm_gmii_bm_readreg;
   9477 		new_writereg = wm_gmii_bm_writereg;
   9478 	}
   9479 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9480 		/* All PCH* use _hv_ */
   9481 		new_readreg = wm_gmii_hv_readreg;
   9482 		new_writereg = wm_gmii_hv_writereg;
   9483 	}
   9484 
   9485 	/* Diag output */
   9486 	if (doubt_phytype != WMPHY_UNKNOWN)
   9487 		aprint_error_dev(dev, "Assumed new PHY type was "
   9488 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9489 		    new_phytype);
   9490 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9491 	    && (sc->sc_phytype != new_phytype ))
   9492 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9493 		    "was incorrect. New PHY type = %u\n",
   9494 		    sc->sc_phytype, new_phytype);
   9495 
   9496 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9497 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9498 
   9499 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9500 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9501 		    "function was incorrect.\n");
   9502 
   9503 	/* Update now */
   9504 	sc->sc_phytype = new_phytype;
   9505 	mii->mii_readreg = new_readreg;
   9506 	mii->mii_writereg = new_writereg;
   9507 }
   9508 
   9509 /*
   9510  * wm_get_phy_id_82575:
   9511  *
   9512  * Return PHY ID. Return -1 if it failed.
   9513  */
   9514 static int
   9515 wm_get_phy_id_82575(struct wm_softc *sc)
   9516 {
   9517 	uint32_t reg;
   9518 	int phyid = -1;
   9519 
   9520 	/* XXX */
   9521 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9522 		return -1;
   9523 
   9524 	if (wm_sgmii_uses_mdio(sc)) {
   9525 		switch (sc->sc_type) {
   9526 		case WM_T_82575:
   9527 		case WM_T_82576:
   9528 			reg = CSR_READ(sc, WMREG_MDIC);
   9529 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9530 			break;
   9531 		case WM_T_82580:
   9532 		case WM_T_I350:
   9533 		case WM_T_I354:
   9534 		case WM_T_I210:
   9535 		case WM_T_I211:
   9536 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9537 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9538 			break;
   9539 		default:
   9540 			return -1;
   9541 		}
   9542 	}
   9543 
   9544 	return phyid;
   9545 }
   9546 
   9547 
   9548 /*
   9549  * wm_gmii_mediainit:
   9550  *
   9551  *	Initialize media for use on 1000BASE-T devices.
   9552  */
   9553 static void
   9554 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9555 {
   9556 	device_t dev = sc->sc_dev;
   9557 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9558 	struct mii_data *mii = &sc->sc_mii;
   9559 	uint32_t reg;
   9560 
   9561 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9562 		device_xname(sc->sc_dev), __func__));
   9563 
   9564 	/* We have GMII. */
   9565 	sc->sc_flags |= WM_F_HAS_MII;
   9566 
   9567 	if (sc->sc_type == WM_T_80003)
   9568 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9569 	else
   9570 		sc->sc_tipg = TIPG_1000T_DFLT;
   9571 
   9572 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9573 	if ((sc->sc_type == WM_T_82580)
   9574 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9575 	    || (sc->sc_type == WM_T_I211)) {
   9576 		reg = CSR_READ(sc, WMREG_PHPM);
   9577 		reg &= ~PHPM_GO_LINK_D;
   9578 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9579 	}
   9580 
   9581 	/*
   9582 	 * Let the chip set speed/duplex on its own based on
   9583 	 * signals from the PHY.
   9584 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9585 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9586 	 */
   9587 	sc->sc_ctrl |= CTRL_SLU;
   9588 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9589 
   9590 	/* Initialize our media structures and probe the GMII. */
   9591 	mii->mii_ifp = ifp;
   9592 
   9593 	mii->mii_statchg = wm_gmii_statchg;
   9594 
   9595 	/* get PHY control from SMBus to PCIe */
   9596 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9597 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9598 		wm_smbustopci(sc);
   9599 
   9600 	wm_gmii_reset(sc);
   9601 
   9602 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9603 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9604 	    wm_gmii_mediastatus);
   9605 
   9606 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9607 	    || (sc->sc_type == WM_T_82580)
   9608 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9609 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9610 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9611 			/* Attach only one port */
   9612 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9613 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9614 		} else {
   9615 			int i, id;
   9616 			uint32_t ctrl_ext;
   9617 
   9618 			id = wm_get_phy_id_82575(sc);
   9619 			if (id != -1) {
   9620 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9621 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9622 			}
   9623 			if ((id == -1)
   9624 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9625 				/* Power on sgmii phy if it is disabled */
   9626 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9627 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9628 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9629 				CSR_WRITE_FLUSH(sc);
   9630 				delay(300*1000); /* XXX too long */
   9631 
   9632 				/* from 1 to 8 */
   9633 				for (i = 1; i < 8; i++)
   9634 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9635 					    0xffffffff, i, MII_OFFSET_ANY,
   9636 					    MIIF_DOPAUSE);
   9637 
   9638 				/* restore previous sfp cage power state */
   9639 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9640 			}
   9641 		}
   9642 	} else {
   9643 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9644 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9645 	}
   9646 
   9647 	/*
   9648 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9649 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9650 	 */
   9651 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9652 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9653 		wm_set_mdio_slow_mode_hv(sc);
   9654 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9655 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9656 	}
   9657 
   9658 	/*
   9659 	 * (For ICH8 variants)
   9660 	 * If PHY detection failed, use BM's r/w function and retry.
   9661 	 */
   9662 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9663 		/* if failed, retry with *_bm_* */
   9664 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9665 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9666 		    sc->sc_phytype);
   9667 		sc->sc_phytype = WMPHY_BM;
   9668 		mii->mii_readreg = wm_gmii_bm_readreg;
   9669 		mii->mii_writereg = wm_gmii_bm_writereg;
   9670 
   9671 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9672 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9673 	}
   9674 
   9675 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9676 		/* Any PHY wasn't find */
   9677 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9678 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9679 		sc->sc_phytype = WMPHY_NONE;
   9680 	} else {
   9681 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9682 
   9683 		/*
   9684 		 * PHY Found! Check PHY type again by the second call of
   9685 		 * wm_gmii_setup_phytype.
   9686 		 */
   9687 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9688 		    child->mii_mpd_model);
   9689 
   9690 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9691 	}
   9692 }
   9693 
   9694 /*
   9695  * wm_gmii_mediachange:	[ifmedia interface function]
   9696  *
   9697  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9698  */
   9699 static int
   9700 wm_gmii_mediachange(struct ifnet *ifp)
   9701 {
   9702 	struct wm_softc *sc = ifp->if_softc;
   9703 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9704 	int rc;
   9705 
   9706 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9707 		device_xname(sc->sc_dev), __func__));
   9708 	if ((ifp->if_flags & IFF_UP) == 0)
   9709 		return 0;
   9710 
   9711 	/* Disable D0 LPLU. */
   9712 	wm_lplu_d0_disable(sc);
   9713 
   9714 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9715 	sc->sc_ctrl |= CTRL_SLU;
   9716 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9717 	    || (sc->sc_type > WM_T_82543)) {
   9718 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9719 	} else {
   9720 		sc->sc_ctrl &= ~CTRL_ASDE;
   9721 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9722 		if (ife->ifm_media & IFM_FDX)
   9723 			sc->sc_ctrl |= CTRL_FD;
   9724 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9725 		case IFM_10_T:
   9726 			sc->sc_ctrl |= CTRL_SPEED_10;
   9727 			break;
   9728 		case IFM_100_TX:
   9729 			sc->sc_ctrl |= CTRL_SPEED_100;
   9730 			break;
   9731 		case IFM_1000_T:
   9732 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9733 			break;
   9734 		default:
   9735 			panic("wm_gmii_mediachange: bad media 0x%x",
   9736 			    ife->ifm_media);
   9737 		}
   9738 	}
   9739 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9740 	CSR_WRITE_FLUSH(sc);
   9741 	if (sc->sc_type <= WM_T_82543)
   9742 		wm_gmii_reset(sc);
   9743 
   9744 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9745 		return 0;
   9746 	return rc;
   9747 }
   9748 
   9749 /*
   9750  * wm_gmii_mediastatus:	[ifmedia interface function]
   9751  *
   9752  *	Get the current interface media status on a 1000BASE-T device.
   9753  */
   9754 static void
   9755 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9756 {
   9757 	struct wm_softc *sc = ifp->if_softc;
   9758 
   9759 	ether_mediastatus(ifp, ifmr);
   9760 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9761 	    | sc->sc_flowflags;
   9762 }
   9763 
   9764 #define	MDI_IO		CTRL_SWDPIN(2)
   9765 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9766 #define	MDI_CLK		CTRL_SWDPIN(3)
   9767 
   9768 static void
   9769 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9770 {
   9771 	uint32_t i, v;
   9772 
   9773 	v = CSR_READ(sc, WMREG_CTRL);
   9774 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9775 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9776 
   9777 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9778 		if (data & i)
   9779 			v |= MDI_IO;
   9780 		else
   9781 			v &= ~MDI_IO;
   9782 		CSR_WRITE(sc, WMREG_CTRL, v);
   9783 		CSR_WRITE_FLUSH(sc);
   9784 		delay(10);
   9785 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9786 		CSR_WRITE_FLUSH(sc);
   9787 		delay(10);
   9788 		CSR_WRITE(sc, WMREG_CTRL, v);
   9789 		CSR_WRITE_FLUSH(sc);
   9790 		delay(10);
   9791 	}
   9792 }
   9793 
   9794 static uint32_t
   9795 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9796 {
   9797 	uint32_t v, i, data = 0;
   9798 
   9799 	v = CSR_READ(sc, WMREG_CTRL);
   9800 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9801 	v |= CTRL_SWDPIO(3);
   9802 
   9803 	CSR_WRITE(sc, WMREG_CTRL, v);
   9804 	CSR_WRITE_FLUSH(sc);
   9805 	delay(10);
   9806 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9807 	CSR_WRITE_FLUSH(sc);
   9808 	delay(10);
   9809 	CSR_WRITE(sc, WMREG_CTRL, v);
   9810 	CSR_WRITE_FLUSH(sc);
   9811 	delay(10);
   9812 
   9813 	for (i = 0; i < 16; i++) {
   9814 		data <<= 1;
   9815 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9816 		CSR_WRITE_FLUSH(sc);
   9817 		delay(10);
   9818 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9819 			data |= 1;
   9820 		CSR_WRITE(sc, WMREG_CTRL, v);
   9821 		CSR_WRITE_FLUSH(sc);
   9822 		delay(10);
   9823 	}
   9824 
   9825 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9826 	CSR_WRITE_FLUSH(sc);
   9827 	delay(10);
   9828 	CSR_WRITE(sc, WMREG_CTRL, v);
   9829 	CSR_WRITE_FLUSH(sc);
   9830 	delay(10);
   9831 
   9832 	return data;
   9833 }
   9834 
   9835 #undef MDI_IO
   9836 #undef MDI_DIR
   9837 #undef MDI_CLK
   9838 
   9839 /*
   9840  * wm_gmii_i82543_readreg:	[mii interface function]
   9841  *
   9842  *	Read a PHY register on the GMII (i82543 version).
   9843  */
   9844 static int
   9845 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9846 {
   9847 	struct wm_softc *sc = device_private(dev);
   9848 	int rv;
   9849 
   9850 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9851 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9852 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9853 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9854 
   9855 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9856 	    device_xname(dev), phy, reg, rv));
   9857 
   9858 	return rv;
   9859 }
   9860 
   9861 /*
   9862  * wm_gmii_i82543_writereg:	[mii interface function]
   9863  *
   9864  *	Write a PHY register on the GMII (i82543 version).
   9865  */
   9866 static void
   9867 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   9868 {
   9869 	struct wm_softc *sc = device_private(dev);
   9870 
   9871 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9872 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9873 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9874 	    (MII_COMMAND_START << 30), 32);
   9875 }
   9876 
   9877 /*
   9878  * wm_gmii_mdic_readreg:	[mii interface function]
   9879  *
   9880  *	Read a PHY register on the GMII.
   9881  */
   9882 static int
   9883 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   9884 {
   9885 	struct wm_softc *sc = device_private(dev);
   9886 	uint32_t mdic = 0;
   9887 	int i, rv;
   9888 
   9889 	if (reg > MII_ADDRMASK) {
   9890 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9891 		    __func__, sc->sc_phytype, reg);
   9892 		reg &= MII_ADDRMASK;
   9893 	}
   9894 
   9895 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9896 	    MDIC_REGADD(reg));
   9897 
   9898 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9899 		mdic = CSR_READ(sc, WMREG_MDIC);
   9900 		if (mdic & MDIC_READY)
   9901 			break;
   9902 		delay(50);
   9903 	}
   9904 
   9905 	if ((mdic & MDIC_READY) == 0) {
   9906 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9907 		    device_xname(dev), phy, reg);
   9908 		rv = 0;
   9909 	} else if (mdic & MDIC_E) {
   9910 #if 0 /* This is normal if no PHY is present. */
   9911 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9912 		    device_xname(dev), phy, reg);
   9913 #endif
   9914 		rv = 0;
   9915 	} else {
   9916 		rv = MDIC_DATA(mdic);
   9917 		if (rv == 0xffff)
   9918 			rv = 0;
   9919 	}
   9920 
   9921 	return rv;
   9922 }
   9923 
   9924 /*
   9925  * wm_gmii_mdic_writereg:	[mii interface function]
   9926  *
   9927  *	Write a PHY register on the GMII.
   9928  */
   9929 static void
   9930 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   9931 {
   9932 	struct wm_softc *sc = device_private(dev);
   9933 	uint32_t mdic = 0;
   9934 	int i;
   9935 
   9936 	if (reg > MII_ADDRMASK) {
   9937 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9938 		    __func__, sc->sc_phytype, reg);
   9939 		reg &= MII_ADDRMASK;
   9940 	}
   9941 
   9942 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9943 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9944 
   9945 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9946 		mdic = CSR_READ(sc, WMREG_MDIC);
   9947 		if (mdic & MDIC_READY)
   9948 			break;
   9949 		delay(50);
   9950 	}
   9951 
   9952 	if ((mdic & MDIC_READY) == 0)
   9953 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9954 		    device_xname(dev), phy, reg);
   9955 	else if (mdic & MDIC_E)
   9956 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9957 		    device_xname(dev), phy, reg);
   9958 }
   9959 
   9960 /*
   9961  * wm_gmii_i82544_readreg:	[mii interface function]
   9962  *
   9963  *	Read a PHY register on the GMII.
   9964  */
   9965 static int
   9966 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   9967 {
   9968 	struct wm_softc *sc = device_private(dev);
   9969 	int rv;
   9970 
   9971 	if (sc->phy.acquire(sc)) {
   9972 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9973 		return 0;
   9974 	}
   9975 
   9976 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9977 		switch (sc->sc_phytype) {
   9978 		case WMPHY_IGP:
   9979 		case WMPHY_IGP_2:
   9980 		case WMPHY_IGP_3:
   9981 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9982 			break;
   9983 		default:
   9984 #ifdef WM_DEBUG
   9985 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   9986 			    __func__, sc->sc_phytype, reg);
   9987 #endif
   9988 			break;
   9989 		}
   9990 	}
   9991 
   9992 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9993 	sc->phy.release(sc);
   9994 
   9995 	return rv;
   9996 }
   9997 
   9998 /*
   9999  * wm_gmii_i82544_writereg:	[mii interface function]
   10000  *
   10001  *	Write a PHY register on the GMII.
   10002  */
   10003 static void
   10004 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   10005 {
   10006 	struct wm_softc *sc = device_private(dev);
   10007 
   10008 	if (sc->phy.acquire(sc)) {
   10009 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10010 		return;
   10011 	}
   10012 
   10013 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10014 		switch (sc->sc_phytype) {
   10015 		case WMPHY_IGP:
   10016 		case WMPHY_IGP_2:
   10017 		case WMPHY_IGP_3:
   10018 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   10019 			break;
   10020 		default:
   10021 #ifdef WM_DEBUG
   10022 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10023 			    __func__, sc->sc_phytype, reg);
   10024 #endif
   10025 			break;
   10026 		}
   10027 	}
   10028 
   10029 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10030 	sc->phy.release(sc);
   10031 }
   10032 
   10033 /*
   10034  * wm_gmii_i80003_readreg:	[mii interface function]
   10035  *
   10036  *	Read a PHY register on the kumeran
   10037  * This could be handled by the PHY layer if we didn't have to lock the
   10038  * ressource ...
   10039  */
   10040 static int
   10041 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10042 {
   10043 	struct wm_softc *sc = device_private(dev);
   10044 	int page_select, temp;
   10045 	int rv;
   10046 
   10047 	if (phy != 1) /* only one PHY on kumeran bus */
   10048 		return 0;
   10049 
   10050 	if (sc->phy.acquire(sc)) {
   10051 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10052 		return 0;
   10053 	}
   10054 
   10055 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10056 		page_select = GG82563_PHY_PAGE_SELECT;
   10057 	else {
   10058 		/*
   10059 		 * Use Alternative Page Select register to access registers
   10060 		 * 30 and 31.
   10061 		 */
   10062 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10063 	}
   10064 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10065 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10066 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10067 		/*
   10068 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10069 		 * register.
   10070 		 */
   10071 		delay(200);
   10072 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10073 			device_printf(dev, "%s failed\n", __func__);
   10074 			rv = 0; /* XXX */
   10075 			goto out;
   10076 		}
   10077 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10078 		delay(200);
   10079 	} else
   10080 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10081 
   10082 out:
   10083 	sc->phy.release(sc);
   10084 	return rv;
   10085 }
   10086 
   10087 /*
   10088  * wm_gmii_i80003_writereg:	[mii interface function]
   10089  *
   10090  *	Write a PHY register on the kumeran.
   10091  * This could be handled by the PHY layer if we didn't have to lock the
   10092  * ressource ...
   10093  */
   10094 static void
   10095 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10096 {
   10097 	struct wm_softc *sc = device_private(dev);
   10098 	int page_select, temp;
   10099 
   10100 	if (phy != 1) /* only one PHY on kumeran bus */
   10101 		return;
   10102 
   10103 	if (sc->phy.acquire(sc)) {
   10104 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10105 		return;
   10106 	}
   10107 
   10108 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10109 		page_select = GG82563_PHY_PAGE_SELECT;
   10110 	else {
   10111 		/*
   10112 		 * Use Alternative Page Select register to access registers
   10113 		 * 30 and 31.
   10114 		 */
   10115 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10116 	}
   10117 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10118 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10119 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10120 		/*
   10121 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10122 		 * register.
   10123 		 */
   10124 		delay(200);
   10125 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10126 			device_printf(dev, "%s failed\n", __func__);
   10127 			goto out;
   10128 		}
   10129 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10130 		delay(200);
   10131 	} else
   10132 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10133 
   10134 out:
   10135 	sc->phy.release(sc);
   10136 }
   10137 
   10138 /*
   10139  * wm_gmii_bm_readreg:	[mii interface function]
   10140  *
   10141  *	Read a PHY register on the kumeran
   10142  * This could be handled by the PHY layer if we didn't have to lock the
   10143  * ressource ...
   10144  */
   10145 static int
   10146 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10147 {
   10148 	struct wm_softc *sc = device_private(dev);
   10149 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10150 	uint16_t val;
   10151 	int rv;
   10152 
   10153 	if (sc->phy.acquire(sc)) {
   10154 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10155 		return 0;
   10156 	}
   10157 
   10158 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10159 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10160 		    || (reg == 31)) ? 1 : phy;
   10161 	/* Page 800 works differently than the rest so it has its own func */
   10162 	if (page == BM_WUC_PAGE) {
   10163 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10164 		rv = val;
   10165 		goto release;
   10166 	}
   10167 
   10168 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10169 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10170 		    && (sc->sc_type != WM_T_82583))
   10171 			wm_gmii_mdic_writereg(dev, phy,
   10172 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10173 		else
   10174 			wm_gmii_mdic_writereg(dev, phy,
   10175 			    BME1000_PHY_PAGE_SELECT, page);
   10176 	}
   10177 
   10178 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10179 
   10180 release:
   10181 	sc->phy.release(sc);
   10182 	return rv;
   10183 }
   10184 
   10185 /*
   10186  * wm_gmii_bm_writereg:	[mii interface function]
   10187  *
   10188  *	Write a PHY register on the kumeran.
   10189  * This could be handled by the PHY layer if we didn't have to lock the
   10190  * ressource ...
   10191  */
   10192 static void
   10193 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10194 {
   10195 	struct wm_softc *sc = device_private(dev);
   10196 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10197 
   10198 	if (sc->phy.acquire(sc)) {
   10199 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10200 		return;
   10201 	}
   10202 
   10203 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10204 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10205 		    || (reg == 31)) ? 1 : phy;
   10206 	/* Page 800 works differently than the rest so it has its own func */
   10207 	if (page == BM_WUC_PAGE) {
   10208 		uint16_t tmp;
   10209 
   10210 		tmp = val;
   10211 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10212 		goto release;
   10213 	}
   10214 
   10215 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10216 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10217 		    && (sc->sc_type != WM_T_82583))
   10218 			wm_gmii_mdic_writereg(dev, phy,
   10219 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10220 		else
   10221 			wm_gmii_mdic_writereg(dev, phy,
   10222 			    BME1000_PHY_PAGE_SELECT, page);
   10223 	}
   10224 
   10225 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10226 
   10227 release:
   10228 	sc->phy.release(sc);
   10229 }
   10230 
   10231 static void
   10232 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10233 {
   10234 	struct wm_softc *sc = device_private(dev);
   10235 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10236 	uint16_t wuce, reg;
   10237 
   10238 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10239 		device_xname(dev), __func__));
   10240 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10241 	if (sc->sc_type == WM_T_PCH) {
   10242 		/* XXX e1000 driver do nothing... why? */
   10243 	}
   10244 
   10245 	/*
   10246 	 * 1) Enable PHY wakeup register first.
   10247 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10248 	 */
   10249 
   10250 	/* Set page 769 */
   10251 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10252 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10253 
   10254 	/* Read WUCE and save it */
   10255 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10256 
   10257 	reg = wuce | BM_WUC_ENABLE_BIT;
   10258 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10259 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10260 
   10261 	/* Select page 800 */
   10262 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10263 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10264 
   10265 	/*
   10266 	 * 2) Access PHY wakeup register.
   10267 	 * See e1000_access_phy_wakeup_reg_bm.
   10268 	 */
   10269 
   10270 	/* Write page 800 */
   10271 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10272 
   10273 	if (rd)
   10274 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10275 	else
   10276 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10277 
   10278 	/*
   10279 	 * 3) Disable PHY wakeup register.
   10280 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10281 	 */
   10282 	/* Set page 769 */
   10283 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10284 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10285 
   10286 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10287 }
   10288 
   10289 /*
   10290  * wm_gmii_hv_readreg:	[mii interface function]
   10291  *
   10292  *	Read a PHY register on the kumeran
   10293  * This could be handled by the PHY layer if we didn't have to lock the
   10294  * ressource ...
   10295  */
   10296 static int
   10297 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10298 {
   10299 	struct wm_softc *sc = device_private(dev);
   10300 	int rv;
   10301 
   10302 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10303 		device_xname(dev), __func__));
   10304 	if (sc->phy.acquire(sc)) {
   10305 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10306 		return 0;
   10307 	}
   10308 
   10309 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10310 	sc->phy.release(sc);
   10311 	return rv;
   10312 }
   10313 
   10314 static int
   10315 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10316 {
   10317 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10318 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10319 	uint16_t val;
   10320 	int rv;
   10321 
   10322 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10323 
   10324 	/* Page 800 works differently than the rest so it has its own func */
   10325 	if (page == BM_WUC_PAGE) {
   10326 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10327 		return val;
   10328 	}
   10329 
   10330 	/*
   10331 	 * Lower than page 768 works differently than the rest so it has its
   10332 	 * own func
   10333 	 */
   10334 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10335 		printf("gmii_hv_readreg!!!\n");
   10336 		return 0;
   10337 	}
   10338 
   10339 	/*
   10340 	 * XXX I21[789] documents say that the SMBus Address register is at
   10341 	 * PHY address 01, Page 0 (not 768), Register 26.
   10342 	 */
   10343 	if (page == HV_INTC_FC_PAGE_START)
   10344 		page = 0;
   10345 
   10346 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10347 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10348 		    page << BME1000_PAGE_SHIFT);
   10349 	}
   10350 
   10351 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10352 	return rv;
   10353 }
   10354 
   10355 /*
   10356  * wm_gmii_hv_writereg:	[mii interface function]
   10357  *
   10358  *	Write a PHY register on the kumeran.
   10359  * This could be handled by the PHY layer if we didn't have to lock the
   10360  * ressource ...
   10361  */
   10362 static void
   10363 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10364 {
   10365 	struct wm_softc *sc = device_private(dev);
   10366 
   10367 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10368 		device_xname(dev), __func__));
   10369 
   10370 	if (sc->phy.acquire(sc)) {
   10371 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10372 		return;
   10373 	}
   10374 
   10375 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10376 	sc->phy.release(sc);
   10377 }
   10378 
   10379 static void
   10380 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10381 {
   10382 	struct wm_softc *sc = device_private(dev);
   10383 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10384 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10385 
   10386 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10387 
   10388 	/* Page 800 works differently than the rest so it has its own func */
   10389 	if (page == BM_WUC_PAGE) {
   10390 		uint16_t tmp;
   10391 
   10392 		tmp = val;
   10393 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10394 		return;
   10395 	}
   10396 
   10397 	/*
   10398 	 * Lower than page 768 works differently than the rest so it has its
   10399 	 * own func
   10400 	 */
   10401 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10402 		printf("gmii_hv_writereg!!!\n");
   10403 		return;
   10404 	}
   10405 
   10406 	{
   10407 		/*
   10408 		 * XXX I21[789] documents say that the SMBus Address register
   10409 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10410 		 */
   10411 		if (page == HV_INTC_FC_PAGE_START)
   10412 			page = 0;
   10413 
   10414 		/*
   10415 		 * XXX Workaround MDIO accesses being disabled after entering
   10416 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10417 		 * register is set)
   10418 		 */
   10419 		if (sc->sc_phytype == WMPHY_82578) {
   10420 			struct mii_softc *child;
   10421 
   10422 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10423 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10424 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10425 			    && ((val & (1 << 11)) != 0)) {
   10426 				printf("XXX need workaround\n");
   10427 			}
   10428 		}
   10429 
   10430 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10431 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10432 			    page << BME1000_PAGE_SHIFT);
   10433 		}
   10434 	}
   10435 
   10436 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10437 }
   10438 
   10439 /*
   10440  * wm_gmii_82580_readreg:	[mii interface function]
   10441  *
   10442  *	Read a PHY register on the 82580 and I350.
   10443  * This could be handled by the PHY layer if we didn't have to lock the
   10444  * ressource ...
   10445  */
   10446 static int
   10447 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10448 {
   10449 	struct wm_softc *sc = device_private(dev);
   10450 	int rv;
   10451 
   10452 	if (sc->phy.acquire(sc) != 0) {
   10453 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10454 		return 0;
   10455 	}
   10456 
   10457 #ifdef DIAGNOSTIC
   10458 	if (reg > MII_ADDRMASK) {
   10459 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10460 		    __func__, sc->sc_phytype, reg);
   10461 		reg &= MII_ADDRMASK;
   10462 	}
   10463 #endif
   10464 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10465 
   10466 	sc->phy.release(sc);
   10467 	return rv;
   10468 }
   10469 
   10470 /*
   10471  * wm_gmii_82580_writereg:	[mii interface function]
   10472  *
   10473  *	Write a PHY register on the 82580 and I350.
   10474  * This could be handled by the PHY layer if we didn't have to lock the
   10475  * ressource ...
   10476  */
   10477 static void
   10478 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10479 {
   10480 	struct wm_softc *sc = device_private(dev);
   10481 
   10482 	if (sc->phy.acquire(sc) != 0) {
   10483 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10484 		return;
   10485 	}
   10486 
   10487 #ifdef DIAGNOSTIC
   10488 	if (reg > MII_ADDRMASK) {
   10489 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10490 		    __func__, sc->sc_phytype, reg);
   10491 		reg &= MII_ADDRMASK;
   10492 	}
   10493 #endif
   10494 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10495 
   10496 	sc->phy.release(sc);
   10497 }
   10498 
   10499 /*
   10500  * wm_gmii_gs40g_readreg:	[mii interface function]
   10501  *
   10502  *	Read a PHY register on the I2100 and I211.
   10503  * This could be handled by the PHY layer if we didn't have to lock the
   10504  * ressource ...
   10505  */
   10506 static int
   10507 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10508 {
   10509 	struct wm_softc *sc = device_private(dev);
   10510 	int page, offset;
   10511 	int rv;
   10512 
   10513 	/* Acquire semaphore */
   10514 	if (sc->phy.acquire(sc)) {
   10515 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10516 		return 0;
   10517 	}
   10518 
   10519 	/* Page select */
   10520 	page = reg >> GS40G_PAGE_SHIFT;
   10521 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10522 
   10523 	/* Read reg */
   10524 	offset = reg & GS40G_OFFSET_MASK;
   10525 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10526 
   10527 	sc->phy.release(sc);
   10528 	return rv;
   10529 }
   10530 
   10531 /*
   10532  * wm_gmii_gs40g_writereg:	[mii interface function]
   10533  *
   10534  *	Write a PHY register on the I210 and I211.
   10535  * This could be handled by the PHY layer if we didn't have to lock the
   10536  * ressource ...
   10537  */
   10538 static void
   10539 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10540 {
   10541 	struct wm_softc *sc = device_private(dev);
   10542 	int page, offset;
   10543 
   10544 	/* Acquire semaphore */
   10545 	if (sc->phy.acquire(sc)) {
   10546 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10547 		return;
   10548 	}
   10549 
   10550 	/* Page select */
   10551 	page = reg >> GS40G_PAGE_SHIFT;
   10552 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10553 
   10554 	/* Write reg */
   10555 	offset = reg & GS40G_OFFSET_MASK;
   10556 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10557 
   10558 	/* Release semaphore */
   10559 	sc->phy.release(sc);
   10560 }
   10561 
   10562 /*
   10563  * wm_gmii_statchg:	[mii interface function]
   10564  *
   10565  *	Callback from MII layer when media changes.
   10566  */
   10567 static void
   10568 wm_gmii_statchg(struct ifnet *ifp)
   10569 {
   10570 	struct wm_softc *sc = ifp->if_softc;
   10571 	struct mii_data *mii = &sc->sc_mii;
   10572 
   10573 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10574 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10575 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10576 
   10577 	/*
   10578 	 * Get flow control negotiation result.
   10579 	 */
   10580 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10581 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10582 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10583 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10584 	}
   10585 
   10586 	if (sc->sc_flowflags & IFM_FLOW) {
   10587 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10588 			sc->sc_ctrl |= CTRL_TFCE;
   10589 			sc->sc_fcrtl |= FCRTL_XONE;
   10590 		}
   10591 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10592 			sc->sc_ctrl |= CTRL_RFCE;
   10593 	}
   10594 
   10595 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10596 		DPRINTF(WM_DEBUG_LINK,
   10597 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10598 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10599 	} else {
   10600 		DPRINTF(WM_DEBUG_LINK,
   10601 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10602 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10603 	}
   10604 
   10605 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10606 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10607 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10608 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10609 	if (sc->sc_type == WM_T_80003) {
   10610 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10611 		case IFM_1000_T:
   10612 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10613 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10614 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10615 			break;
   10616 		default:
   10617 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10618 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10619 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10620 			break;
   10621 		}
   10622 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10623 	}
   10624 }
   10625 
   10626 /* kumeran related (80003, ICH* and PCH*) */
   10627 
   10628 /*
   10629  * wm_kmrn_readreg:
   10630  *
   10631  *	Read a kumeran register
   10632  */
   10633 static int
   10634 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10635 {
   10636 	int rv;
   10637 
   10638 	if (sc->sc_type == WM_T_80003)
   10639 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10640 	else
   10641 		rv = sc->phy.acquire(sc);
   10642 	if (rv != 0) {
   10643 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10644 		    __func__);
   10645 		return rv;
   10646 	}
   10647 
   10648 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10649 
   10650 	if (sc->sc_type == WM_T_80003)
   10651 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10652 	else
   10653 		sc->phy.release(sc);
   10654 
   10655 	return rv;
   10656 }
   10657 
   10658 static int
   10659 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10660 {
   10661 
   10662 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10663 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10664 	    KUMCTRLSTA_REN);
   10665 	CSR_WRITE_FLUSH(sc);
   10666 	delay(2);
   10667 
   10668 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10669 
   10670 	return 0;
   10671 }
   10672 
   10673 /*
   10674  * wm_kmrn_writereg:
   10675  *
   10676  *	Write a kumeran register
   10677  */
   10678 static int
   10679 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10680 {
   10681 	int rv;
   10682 
   10683 	if (sc->sc_type == WM_T_80003)
   10684 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10685 	else
   10686 		rv = sc->phy.acquire(sc);
   10687 	if (rv != 0) {
   10688 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10689 		    __func__);
   10690 		return rv;
   10691 	}
   10692 
   10693 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10694 
   10695 	if (sc->sc_type == WM_T_80003)
   10696 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10697 	else
   10698 		sc->phy.release(sc);
   10699 
   10700 	return rv;
   10701 }
   10702 
   10703 static int
   10704 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10705 {
   10706 
   10707 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10708 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10709 
   10710 	return 0;
   10711 }
   10712 
   10713 /* SGMII related */
   10714 
   10715 /*
   10716  * wm_sgmii_uses_mdio
   10717  *
   10718  * Check whether the transaction is to the internal PHY or the external
   10719  * MDIO interface. Return true if it's MDIO.
   10720  */
   10721 static bool
   10722 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10723 {
   10724 	uint32_t reg;
   10725 	bool ismdio = false;
   10726 
   10727 	switch (sc->sc_type) {
   10728 	case WM_T_82575:
   10729 	case WM_T_82576:
   10730 		reg = CSR_READ(sc, WMREG_MDIC);
   10731 		ismdio = ((reg & MDIC_DEST) != 0);
   10732 		break;
   10733 	case WM_T_82580:
   10734 	case WM_T_I350:
   10735 	case WM_T_I354:
   10736 	case WM_T_I210:
   10737 	case WM_T_I211:
   10738 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10739 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10740 		break;
   10741 	default:
   10742 		break;
   10743 	}
   10744 
   10745 	return ismdio;
   10746 }
   10747 
   10748 /*
   10749  * wm_sgmii_readreg:	[mii interface function]
   10750  *
   10751  *	Read a PHY register on the SGMII
   10752  * This could be handled by the PHY layer if we didn't have to lock the
   10753  * ressource ...
   10754  */
   10755 static int
   10756 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10757 {
   10758 	struct wm_softc *sc = device_private(dev);
   10759 	uint32_t i2ccmd;
   10760 	int i, rv;
   10761 
   10762 	if (sc->phy.acquire(sc)) {
   10763 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10764 		return 0;
   10765 	}
   10766 
   10767 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10768 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10769 	    | I2CCMD_OPCODE_READ;
   10770 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10771 
   10772 	/* Poll the ready bit */
   10773 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10774 		delay(50);
   10775 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10776 		if (i2ccmd & I2CCMD_READY)
   10777 			break;
   10778 	}
   10779 	if ((i2ccmd & I2CCMD_READY) == 0)
   10780 		device_printf(dev, "I2CCMD Read did not complete\n");
   10781 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10782 		device_printf(dev, "I2CCMD Error bit set\n");
   10783 
   10784 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10785 
   10786 	sc->phy.release(sc);
   10787 	return rv;
   10788 }
   10789 
   10790 /*
   10791  * wm_sgmii_writereg:	[mii interface function]
   10792  *
   10793  *	Write a PHY register on the SGMII.
   10794  * This could be handled by the PHY layer if we didn't have to lock the
   10795  * ressource ...
   10796  */
   10797 static void
   10798 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10799 {
   10800 	struct wm_softc *sc = device_private(dev);
   10801 	uint32_t i2ccmd;
   10802 	int i;
   10803 	int val_swapped;
   10804 
   10805 	if (sc->phy.acquire(sc) != 0) {
   10806 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10807 		return;
   10808 	}
   10809 	/* Swap the data bytes for the I2C interface */
   10810 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10811 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10812 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10813 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10814 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10815 
   10816 	/* Poll the ready bit */
   10817 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10818 		delay(50);
   10819 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10820 		if (i2ccmd & I2CCMD_READY)
   10821 			break;
   10822 	}
   10823 	if ((i2ccmd & I2CCMD_READY) == 0)
   10824 		device_printf(dev, "I2CCMD Write did not complete\n");
   10825 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10826 		device_printf(dev, "I2CCMD Error bit set\n");
   10827 
   10828 	sc->phy.release(sc);
   10829 }
   10830 
   10831 /* TBI related */
   10832 
   10833 /*
   10834  * wm_tbi_mediainit:
   10835  *
   10836  *	Initialize media for use on 1000BASE-X devices.
   10837  */
   10838 static void
   10839 wm_tbi_mediainit(struct wm_softc *sc)
   10840 {
   10841 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10842 	const char *sep = "";
   10843 
   10844 	if (sc->sc_type < WM_T_82543)
   10845 		sc->sc_tipg = TIPG_WM_DFLT;
   10846 	else
   10847 		sc->sc_tipg = TIPG_LG_DFLT;
   10848 
   10849 	sc->sc_tbi_serdes_anegticks = 5;
   10850 
   10851 	/* Initialize our media structures */
   10852 	sc->sc_mii.mii_ifp = ifp;
   10853 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10854 
   10855 	if ((sc->sc_type >= WM_T_82575)
   10856 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10857 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10858 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10859 	else
   10860 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10861 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10862 
   10863 	/*
   10864 	 * SWD Pins:
   10865 	 *
   10866 	 *	0 = Link LED (output)
   10867 	 *	1 = Loss Of Signal (input)
   10868 	 */
   10869 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10870 
   10871 	/* XXX Perhaps this is only for TBI */
   10872 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10873 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10874 
   10875 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10876 		sc->sc_ctrl &= ~CTRL_LRST;
   10877 
   10878 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10879 
   10880 #define	ADD(ss, mm, dd)							\
   10881 do {									\
   10882 	aprint_normal("%s%s", sep, ss);					\
   10883 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10884 	sep = ", ";							\
   10885 } while (/*CONSTCOND*/0)
   10886 
   10887 	aprint_normal_dev(sc->sc_dev, "");
   10888 
   10889 	if (sc->sc_type == WM_T_I354) {
   10890 		uint32_t status;
   10891 
   10892 		status = CSR_READ(sc, WMREG_STATUS);
   10893 		if (((status & STATUS_2P5_SKU) != 0)
   10894 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10895 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10896 		} else
   10897 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10898 	} else if (sc->sc_type == WM_T_82545) {
   10899 		/* Only 82545 is LX (XXX except SFP) */
   10900 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10901 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10902 	} else {
   10903 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10904 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10905 	}
   10906 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10907 	aprint_normal("\n");
   10908 
   10909 #undef ADD
   10910 
   10911 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10912 }
   10913 
   10914 /*
   10915  * wm_tbi_mediachange:	[ifmedia interface function]
   10916  *
   10917  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10918  */
   10919 static int
   10920 wm_tbi_mediachange(struct ifnet *ifp)
   10921 {
   10922 	struct wm_softc *sc = ifp->if_softc;
   10923 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10924 	uint32_t status;
   10925 	int i;
   10926 
   10927 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10928 		/* XXX need some work for >= 82571 and < 82575 */
   10929 		if (sc->sc_type < WM_T_82575)
   10930 			return 0;
   10931 	}
   10932 
   10933 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10934 	    || (sc->sc_type >= WM_T_82575))
   10935 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10936 
   10937 	sc->sc_ctrl &= ~CTRL_LRST;
   10938 	sc->sc_txcw = TXCW_ANE;
   10939 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10940 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10941 	else if (ife->ifm_media & IFM_FDX)
   10942 		sc->sc_txcw |= TXCW_FD;
   10943 	else
   10944 		sc->sc_txcw |= TXCW_HD;
   10945 
   10946 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10947 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10948 
   10949 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10950 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10951 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10952 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10953 	CSR_WRITE_FLUSH(sc);
   10954 	delay(1000);
   10955 
   10956 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10957 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10958 
   10959 	/*
   10960 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10961 	 * optics detect a signal, 0 if they don't.
   10962 	 */
   10963 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10964 		/* Have signal; wait for the link to come up. */
   10965 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10966 			delay(10000);
   10967 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10968 				break;
   10969 		}
   10970 
   10971 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10972 			    device_xname(sc->sc_dev),i));
   10973 
   10974 		status = CSR_READ(sc, WMREG_STATUS);
   10975 		DPRINTF(WM_DEBUG_LINK,
   10976 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10977 			device_xname(sc->sc_dev),status, STATUS_LU));
   10978 		if (status & STATUS_LU) {
   10979 			/* Link is up. */
   10980 			DPRINTF(WM_DEBUG_LINK,
   10981 			    ("%s: LINK: set media -> link up %s\n",
   10982 			    device_xname(sc->sc_dev),
   10983 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10984 
   10985 			/*
   10986 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10987 			 * so we should update sc->sc_ctrl
   10988 			 */
   10989 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10990 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10991 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10992 			if (status & STATUS_FD)
   10993 				sc->sc_tctl |=
   10994 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10995 			else
   10996 				sc->sc_tctl |=
   10997 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10998 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10999 				sc->sc_fcrtl |= FCRTL_XONE;
   11000 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11001 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11002 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   11003 				      sc->sc_fcrtl);
   11004 			sc->sc_tbi_linkup = 1;
   11005 		} else {
   11006 			if (i == WM_LINKUP_TIMEOUT)
   11007 				wm_check_for_link(sc);
   11008 			/* Link is down. */
   11009 			DPRINTF(WM_DEBUG_LINK,
   11010 			    ("%s: LINK: set media -> link down\n",
   11011 			    device_xname(sc->sc_dev)));
   11012 			sc->sc_tbi_linkup = 0;
   11013 		}
   11014 	} else {
   11015 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11016 		    device_xname(sc->sc_dev)));
   11017 		sc->sc_tbi_linkup = 0;
   11018 	}
   11019 
   11020 	wm_tbi_serdes_set_linkled(sc);
   11021 
   11022 	return 0;
   11023 }
   11024 
   11025 /*
   11026  * wm_tbi_mediastatus:	[ifmedia interface function]
   11027  *
   11028  *	Get the current interface media status on a 1000BASE-X device.
   11029  */
   11030 static void
   11031 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11032 {
   11033 	struct wm_softc *sc = ifp->if_softc;
   11034 	uint32_t ctrl, status;
   11035 
   11036 	ifmr->ifm_status = IFM_AVALID;
   11037 	ifmr->ifm_active = IFM_ETHER;
   11038 
   11039 	status = CSR_READ(sc, WMREG_STATUS);
   11040 	if ((status & STATUS_LU) == 0) {
   11041 		ifmr->ifm_active |= IFM_NONE;
   11042 		return;
   11043 	}
   11044 
   11045 	ifmr->ifm_status |= IFM_ACTIVE;
   11046 	/* Only 82545 is LX */
   11047 	if (sc->sc_type == WM_T_82545)
   11048 		ifmr->ifm_active |= IFM_1000_LX;
   11049 	else
   11050 		ifmr->ifm_active |= IFM_1000_SX;
   11051 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11052 		ifmr->ifm_active |= IFM_FDX;
   11053 	else
   11054 		ifmr->ifm_active |= IFM_HDX;
   11055 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11056 	if (ctrl & CTRL_RFCE)
   11057 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11058 	if (ctrl & CTRL_TFCE)
   11059 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11060 }
   11061 
   11062 /* XXX TBI only */
   11063 static int
   11064 wm_check_for_link(struct wm_softc *sc)
   11065 {
   11066 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11067 	uint32_t rxcw;
   11068 	uint32_t ctrl;
   11069 	uint32_t status;
   11070 	uint32_t sig;
   11071 
   11072 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11073 		/* XXX need some work for >= 82571 */
   11074 		if (sc->sc_type >= WM_T_82571) {
   11075 			sc->sc_tbi_linkup = 1;
   11076 			return 0;
   11077 		}
   11078 	}
   11079 
   11080 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11081 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11082 	status = CSR_READ(sc, WMREG_STATUS);
   11083 
   11084 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   11085 
   11086 	DPRINTF(WM_DEBUG_LINK,
   11087 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   11088 		device_xname(sc->sc_dev), __func__,
   11089 		((ctrl & CTRL_SWDPIN(1)) == sig),
   11090 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11091 
   11092 	/*
   11093 	 * SWDPIN   LU RXCW
   11094 	 *      0    0    0
   11095 	 *      0    0    1	(should not happen)
   11096 	 *      0    1    0	(should not happen)
   11097 	 *      0    1    1	(should not happen)
   11098 	 *      1    0    0	Disable autonego and force linkup
   11099 	 *      1    0    1	got /C/ but not linkup yet
   11100 	 *      1    1    0	(linkup)
   11101 	 *      1    1    1	If IFM_AUTO, back to autonego
   11102 	 *
   11103 	 */
   11104 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11105 	    && ((status & STATUS_LU) == 0)
   11106 	    && ((rxcw & RXCW_C) == 0)) {
   11107 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   11108 			__func__));
   11109 		sc->sc_tbi_linkup = 0;
   11110 		/* Disable auto-negotiation in the TXCW register */
   11111 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11112 
   11113 		/*
   11114 		 * Force link-up and also force full-duplex.
   11115 		 *
   11116 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11117 		 * so we should update sc->sc_ctrl
   11118 		 */
   11119 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11120 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11121 	} else if (((status & STATUS_LU) != 0)
   11122 	    && ((rxcw & RXCW_C) != 0)
   11123 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11124 		sc->sc_tbi_linkup = 1;
   11125 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   11126 			__func__));
   11127 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11128 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11129 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11130 	    && ((rxcw & RXCW_C) != 0)) {
   11131 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   11132 	} else {
   11133 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   11134 			status));
   11135 	}
   11136 
   11137 	return 0;
   11138 }
   11139 
   11140 /*
   11141  * wm_tbi_tick:
   11142  *
   11143  *	Check the link on TBI devices.
   11144  *	This function acts as mii_tick().
   11145  */
   11146 static void
   11147 wm_tbi_tick(struct wm_softc *sc)
   11148 {
   11149 	struct mii_data *mii = &sc->sc_mii;
   11150 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11151 	uint32_t status;
   11152 
   11153 	KASSERT(WM_CORE_LOCKED(sc));
   11154 
   11155 	status = CSR_READ(sc, WMREG_STATUS);
   11156 
   11157 	/* XXX is this needed? */
   11158 	(void)CSR_READ(sc, WMREG_RXCW);
   11159 	(void)CSR_READ(sc, WMREG_CTRL);
   11160 
   11161 	/* set link status */
   11162 	if ((status & STATUS_LU) == 0) {
   11163 		DPRINTF(WM_DEBUG_LINK,
   11164 		    ("%s: LINK: checklink -> down\n",
   11165 			device_xname(sc->sc_dev)));
   11166 		sc->sc_tbi_linkup = 0;
   11167 	} else if (sc->sc_tbi_linkup == 0) {
   11168 		DPRINTF(WM_DEBUG_LINK,
   11169 		    ("%s: LINK: checklink -> up %s\n",
   11170 			device_xname(sc->sc_dev),
   11171 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11172 		sc->sc_tbi_linkup = 1;
   11173 		sc->sc_tbi_serdes_ticks = 0;
   11174 	}
   11175 
   11176 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11177 		goto setled;
   11178 
   11179 	if ((status & STATUS_LU) == 0) {
   11180 		sc->sc_tbi_linkup = 0;
   11181 		/* If the timer expired, retry autonegotiation */
   11182 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11183 		    && (++sc->sc_tbi_serdes_ticks
   11184 			>= sc->sc_tbi_serdes_anegticks)) {
   11185 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11186 			sc->sc_tbi_serdes_ticks = 0;
   11187 			/*
   11188 			 * Reset the link, and let autonegotiation do
   11189 			 * its thing
   11190 			 */
   11191 			sc->sc_ctrl |= CTRL_LRST;
   11192 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11193 			CSR_WRITE_FLUSH(sc);
   11194 			delay(1000);
   11195 			sc->sc_ctrl &= ~CTRL_LRST;
   11196 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11197 			CSR_WRITE_FLUSH(sc);
   11198 			delay(1000);
   11199 			CSR_WRITE(sc, WMREG_TXCW,
   11200 			    sc->sc_txcw & ~TXCW_ANE);
   11201 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11202 		}
   11203 	}
   11204 
   11205 setled:
   11206 	wm_tbi_serdes_set_linkled(sc);
   11207 }
   11208 
   11209 /* SERDES related */
   11210 static void
   11211 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11212 {
   11213 	uint32_t reg;
   11214 
   11215 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11216 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11217 		return;
   11218 
   11219 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11220 	reg |= PCS_CFG_PCS_EN;
   11221 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11222 
   11223 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11224 	reg &= ~CTRL_EXT_SWDPIN(3);
   11225 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11226 	CSR_WRITE_FLUSH(sc);
   11227 }
   11228 
   11229 static int
   11230 wm_serdes_mediachange(struct ifnet *ifp)
   11231 {
   11232 	struct wm_softc *sc = ifp->if_softc;
   11233 	bool pcs_autoneg = true; /* XXX */
   11234 	uint32_t ctrl_ext, pcs_lctl, reg;
   11235 
   11236 	/* XXX Currently, this function is not called on 8257[12] */
   11237 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11238 	    || (sc->sc_type >= WM_T_82575))
   11239 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11240 
   11241 	wm_serdes_power_up_link_82575(sc);
   11242 
   11243 	sc->sc_ctrl |= CTRL_SLU;
   11244 
   11245 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11246 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11247 
   11248 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11249 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11250 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11251 	case CTRL_EXT_LINK_MODE_SGMII:
   11252 		pcs_autoneg = true;
   11253 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11254 		break;
   11255 	case CTRL_EXT_LINK_MODE_1000KX:
   11256 		pcs_autoneg = false;
   11257 		/* FALLTHROUGH */
   11258 	default:
   11259 		if ((sc->sc_type == WM_T_82575)
   11260 		    || (sc->sc_type == WM_T_82576)) {
   11261 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11262 				pcs_autoneg = false;
   11263 		}
   11264 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11265 		    | CTRL_FRCFDX;
   11266 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11267 	}
   11268 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11269 
   11270 	if (pcs_autoneg) {
   11271 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11272 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11273 
   11274 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11275 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11276 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11277 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11278 	} else
   11279 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11280 
   11281 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11282 
   11283 
   11284 	return 0;
   11285 }
   11286 
   11287 static void
   11288 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11289 {
   11290 	struct wm_softc *sc = ifp->if_softc;
   11291 	struct mii_data *mii = &sc->sc_mii;
   11292 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11293 	uint32_t pcs_adv, pcs_lpab, reg;
   11294 
   11295 	ifmr->ifm_status = IFM_AVALID;
   11296 	ifmr->ifm_active = IFM_ETHER;
   11297 
   11298 	/* Check PCS */
   11299 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11300 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11301 		ifmr->ifm_active |= IFM_NONE;
   11302 		sc->sc_tbi_linkup = 0;
   11303 		goto setled;
   11304 	}
   11305 
   11306 	sc->sc_tbi_linkup = 1;
   11307 	ifmr->ifm_status |= IFM_ACTIVE;
   11308 	if (sc->sc_type == WM_T_I354) {
   11309 		uint32_t status;
   11310 
   11311 		status = CSR_READ(sc, WMREG_STATUS);
   11312 		if (((status & STATUS_2P5_SKU) != 0)
   11313 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11314 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11315 		} else
   11316 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11317 	} else {
   11318 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11319 		case PCS_LSTS_SPEED_10:
   11320 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11321 			break;
   11322 		case PCS_LSTS_SPEED_100:
   11323 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11324 			break;
   11325 		case PCS_LSTS_SPEED_1000:
   11326 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11327 			break;
   11328 		default:
   11329 			device_printf(sc->sc_dev, "Unknown speed\n");
   11330 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11331 			break;
   11332 		}
   11333 	}
   11334 	if ((reg & PCS_LSTS_FDX) != 0)
   11335 		ifmr->ifm_active |= IFM_FDX;
   11336 	else
   11337 		ifmr->ifm_active |= IFM_HDX;
   11338 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11339 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11340 		/* Check flow */
   11341 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11342 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11343 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11344 			goto setled;
   11345 		}
   11346 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11347 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11348 		DPRINTF(WM_DEBUG_LINK,
   11349 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11350 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11351 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11352 			mii->mii_media_active |= IFM_FLOW
   11353 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11354 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11355 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11356 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11357 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11358 			mii->mii_media_active |= IFM_FLOW
   11359 			    | IFM_ETH_TXPAUSE;
   11360 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11361 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11362 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11363 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11364 			mii->mii_media_active |= IFM_FLOW
   11365 			    | IFM_ETH_RXPAUSE;
   11366 		}
   11367 	}
   11368 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11369 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11370 setled:
   11371 	wm_tbi_serdes_set_linkled(sc);
   11372 }
   11373 
   11374 /*
   11375  * wm_serdes_tick:
   11376  *
   11377  *	Check the link on serdes devices.
   11378  */
   11379 static void
   11380 wm_serdes_tick(struct wm_softc *sc)
   11381 {
   11382 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11383 	struct mii_data *mii = &sc->sc_mii;
   11384 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11385 	uint32_t reg;
   11386 
   11387 	KASSERT(WM_CORE_LOCKED(sc));
   11388 
   11389 	mii->mii_media_status = IFM_AVALID;
   11390 	mii->mii_media_active = IFM_ETHER;
   11391 
   11392 	/* Check PCS */
   11393 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11394 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11395 		mii->mii_media_status |= IFM_ACTIVE;
   11396 		sc->sc_tbi_linkup = 1;
   11397 		sc->sc_tbi_serdes_ticks = 0;
   11398 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11399 		if ((reg & PCS_LSTS_FDX) != 0)
   11400 			mii->mii_media_active |= IFM_FDX;
   11401 		else
   11402 			mii->mii_media_active |= IFM_HDX;
   11403 	} else {
   11404 		mii->mii_media_status |= IFM_NONE;
   11405 		sc->sc_tbi_linkup = 0;
   11406 		/* If the timer expired, retry autonegotiation */
   11407 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11408 		    && (++sc->sc_tbi_serdes_ticks
   11409 			>= sc->sc_tbi_serdes_anegticks)) {
   11410 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11411 			sc->sc_tbi_serdes_ticks = 0;
   11412 			/* XXX */
   11413 			wm_serdes_mediachange(ifp);
   11414 		}
   11415 	}
   11416 
   11417 	wm_tbi_serdes_set_linkled(sc);
   11418 }
   11419 
   11420 /* SFP related */
   11421 
   11422 static int
   11423 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11424 {
   11425 	uint32_t i2ccmd;
   11426 	int i;
   11427 
   11428 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11429 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11430 
   11431 	/* Poll the ready bit */
   11432 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11433 		delay(50);
   11434 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11435 		if (i2ccmd & I2CCMD_READY)
   11436 			break;
   11437 	}
   11438 	if ((i2ccmd & I2CCMD_READY) == 0)
   11439 		return -1;
   11440 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11441 		return -1;
   11442 
   11443 	*data = i2ccmd & 0x00ff;
   11444 
   11445 	return 0;
   11446 }
   11447 
   11448 static uint32_t
   11449 wm_sfp_get_media_type(struct wm_softc *sc)
   11450 {
   11451 	uint32_t ctrl_ext;
   11452 	uint8_t val = 0;
   11453 	int timeout = 3;
   11454 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11455 	int rv = -1;
   11456 
   11457 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11458 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11459 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11460 	CSR_WRITE_FLUSH(sc);
   11461 
   11462 	/* Read SFP module data */
   11463 	while (timeout) {
   11464 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11465 		if (rv == 0)
   11466 			break;
   11467 		delay(100*1000); /* XXX too big */
   11468 		timeout--;
   11469 	}
   11470 	if (rv != 0)
   11471 		goto out;
   11472 	switch (val) {
   11473 	case SFF_SFP_ID_SFF:
   11474 		aprint_normal_dev(sc->sc_dev,
   11475 		    "Module/Connector soldered to board\n");
   11476 		break;
   11477 	case SFF_SFP_ID_SFP:
   11478 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11479 		break;
   11480 	case SFF_SFP_ID_UNKNOWN:
   11481 		goto out;
   11482 	default:
   11483 		break;
   11484 	}
   11485 
   11486 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11487 	if (rv != 0) {
   11488 		goto out;
   11489 	}
   11490 
   11491 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11492 		mediatype = WM_MEDIATYPE_SERDES;
   11493 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11494 		sc->sc_flags |= WM_F_SGMII;
   11495 		mediatype = WM_MEDIATYPE_COPPER;
   11496 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11497 		sc->sc_flags |= WM_F_SGMII;
   11498 		mediatype = WM_MEDIATYPE_SERDES;
   11499 	}
   11500 
   11501 out:
   11502 	/* Restore I2C interface setting */
   11503 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11504 
   11505 	return mediatype;
   11506 }
   11507 
   11508 /*
   11509  * NVM related.
   11510  * Microwire, SPI (w/wo EERD) and Flash.
   11511  */
   11512 
   11513 /* Both spi and uwire */
   11514 
   11515 /*
   11516  * wm_eeprom_sendbits:
   11517  *
   11518  *	Send a series of bits to the EEPROM.
   11519  */
   11520 static void
   11521 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11522 {
   11523 	uint32_t reg;
   11524 	int x;
   11525 
   11526 	reg = CSR_READ(sc, WMREG_EECD);
   11527 
   11528 	for (x = nbits; x > 0; x--) {
   11529 		if (bits & (1U << (x - 1)))
   11530 			reg |= EECD_DI;
   11531 		else
   11532 			reg &= ~EECD_DI;
   11533 		CSR_WRITE(sc, WMREG_EECD, reg);
   11534 		CSR_WRITE_FLUSH(sc);
   11535 		delay(2);
   11536 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11537 		CSR_WRITE_FLUSH(sc);
   11538 		delay(2);
   11539 		CSR_WRITE(sc, WMREG_EECD, reg);
   11540 		CSR_WRITE_FLUSH(sc);
   11541 		delay(2);
   11542 	}
   11543 }
   11544 
   11545 /*
   11546  * wm_eeprom_recvbits:
   11547  *
   11548  *	Receive a series of bits from the EEPROM.
   11549  */
   11550 static void
   11551 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11552 {
   11553 	uint32_t reg, val;
   11554 	int x;
   11555 
   11556 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11557 
   11558 	val = 0;
   11559 	for (x = nbits; x > 0; x--) {
   11560 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11561 		CSR_WRITE_FLUSH(sc);
   11562 		delay(2);
   11563 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11564 			val |= (1U << (x - 1));
   11565 		CSR_WRITE(sc, WMREG_EECD, reg);
   11566 		CSR_WRITE_FLUSH(sc);
   11567 		delay(2);
   11568 	}
   11569 	*valp = val;
   11570 }
   11571 
   11572 /* Microwire */
   11573 
   11574 /*
   11575  * wm_nvm_read_uwire:
   11576  *
   11577  *	Read a word from the EEPROM using the MicroWire protocol.
   11578  */
   11579 static int
   11580 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11581 {
   11582 	uint32_t reg, val;
   11583 	int i;
   11584 
   11585 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11586 		device_xname(sc->sc_dev), __func__));
   11587 
   11588 	if (sc->nvm.acquire(sc) != 0)
   11589 		return -1;
   11590 
   11591 	for (i = 0; i < wordcnt; i++) {
   11592 		/* Clear SK and DI. */
   11593 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11594 		CSR_WRITE(sc, WMREG_EECD, reg);
   11595 
   11596 		/*
   11597 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11598 		 * and Xen.
   11599 		 *
   11600 		 * We use this workaround only for 82540 because qemu's
   11601 		 * e1000 act as 82540.
   11602 		 */
   11603 		if (sc->sc_type == WM_T_82540) {
   11604 			reg |= EECD_SK;
   11605 			CSR_WRITE(sc, WMREG_EECD, reg);
   11606 			reg &= ~EECD_SK;
   11607 			CSR_WRITE(sc, WMREG_EECD, reg);
   11608 			CSR_WRITE_FLUSH(sc);
   11609 			delay(2);
   11610 		}
   11611 		/* XXX: end of workaround */
   11612 
   11613 		/* Set CHIP SELECT. */
   11614 		reg |= EECD_CS;
   11615 		CSR_WRITE(sc, WMREG_EECD, reg);
   11616 		CSR_WRITE_FLUSH(sc);
   11617 		delay(2);
   11618 
   11619 		/* Shift in the READ command. */
   11620 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11621 
   11622 		/* Shift in address. */
   11623 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11624 
   11625 		/* Shift out the data. */
   11626 		wm_eeprom_recvbits(sc, &val, 16);
   11627 		data[i] = val & 0xffff;
   11628 
   11629 		/* Clear CHIP SELECT. */
   11630 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11631 		CSR_WRITE(sc, WMREG_EECD, reg);
   11632 		CSR_WRITE_FLUSH(sc);
   11633 		delay(2);
   11634 	}
   11635 
   11636 	sc->nvm.release(sc);
   11637 	return 0;
   11638 }
   11639 
   11640 /* SPI */
   11641 
   11642 /*
   11643  * Set SPI and FLASH related information from the EECD register.
   11644  * For 82541 and 82547, the word size is taken from EEPROM.
   11645  */
   11646 static int
   11647 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11648 {
   11649 	int size;
   11650 	uint32_t reg;
   11651 	uint16_t data;
   11652 
   11653 	reg = CSR_READ(sc, WMREG_EECD);
   11654 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11655 
   11656 	/* Read the size of NVM from EECD by default */
   11657 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11658 	switch (sc->sc_type) {
   11659 	case WM_T_82541:
   11660 	case WM_T_82541_2:
   11661 	case WM_T_82547:
   11662 	case WM_T_82547_2:
   11663 		/* Set dummy value to access EEPROM */
   11664 		sc->sc_nvm_wordsize = 64;
   11665 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   11666 			aprint_error_dev(sc->sc_dev,
   11667 			    "%s: failed to read EEPROM size\n", __func__);
   11668 		}
   11669 		reg = data;
   11670 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11671 		if (size == 0)
   11672 			size = 6; /* 64 word size */
   11673 		else
   11674 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11675 		break;
   11676 	case WM_T_80003:
   11677 	case WM_T_82571:
   11678 	case WM_T_82572:
   11679 	case WM_T_82573: /* SPI case */
   11680 	case WM_T_82574: /* SPI case */
   11681 	case WM_T_82583: /* SPI case */
   11682 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11683 		if (size > 14)
   11684 			size = 14;
   11685 		break;
   11686 	case WM_T_82575:
   11687 	case WM_T_82576:
   11688 	case WM_T_82580:
   11689 	case WM_T_I350:
   11690 	case WM_T_I354:
   11691 	case WM_T_I210:
   11692 	case WM_T_I211:
   11693 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11694 		if (size > 15)
   11695 			size = 15;
   11696 		break;
   11697 	default:
   11698 		aprint_error_dev(sc->sc_dev,
   11699 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11700 		return -1;
   11701 		break;
   11702 	}
   11703 
   11704 	sc->sc_nvm_wordsize = 1 << size;
   11705 
   11706 	return 0;
   11707 }
   11708 
   11709 /*
   11710  * wm_nvm_ready_spi:
   11711  *
   11712  *	Wait for a SPI EEPROM to be ready for commands.
   11713  */
   11714 static int
   11715 wm_nvm_ready_spi(struct wm_softc *sc)
   11716 {
   11717 	uint32_t val;
   11718 	int usec;
   11719 
   11720 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11721 		device_xname(sc->sc_dev), __func__));
   11722 
   11723 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11724 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11725 		wm_eeprom_recvbits(sc, &val, 8);
   11726 		if ((val & SPI_SR_RDY) == 0)
   11727 			break;
   11728 	}
   11729 	if (usec >= SPI_MAX_RETRIES) {
   11730 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11731 		return -1;
   11732 	}
   11733 	return 0;
   11734 }
   11735 
   11736 /*
   11737  * wm_nvm_read_spi:
   11738  *
   11739  *	Read a work from the EEPROM using the SPI protocol.
   11740  */
   11741 static int
   11742 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11743 {
   11744 	uint32_t reg, val;
   11745 	int i;
   11746 	uint8_t opc;
   11747 	int rv = 0;
   11748 
   11749 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11750 		device_xname(sc->sc_dev), __func__));
   11751 
   11752 	if (sc->nvm.acquire(sc) != 0)
   11753 		return -1;
   11754 
   11755 	/* Clear SK and CS. */
   11756 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11757 	CSR_WRITE(sc, WMREG_EECD, reg);
   11758 	CSR_WRITE_FLUSH(sc);
   11759 	delay(2);
   11760 
   11761 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   11762 		goto out;
   11763 
   11764 	/* Toggle CS to flush commands. */
   11765 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11766 	CSR_WRITE_FLUSH(sc);
   11767 	delay(2);
   11768 	CSR_WRITE(sc, WMREG_EECD, reg);
   11769 	CSR_WRITE_FLUSH(sc);
   11770 	delay(2);
   11771 
   11772 	opc = SPI_OPC_READ;
   11773 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11774 		opc |= SPI_OPC_A8;
   11775 
   11776 	wm_eeprom_sendbits(sc, opc, 8);
   11777 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11778 
   11779 	for (i = 0; i < wordcnt; i++) {
   11780 		wm_eeprom_recvbits(sc, &val, 16);
   11781 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11782 	}
   11783 
   11784 	/* Raise CS and clear SK. */
   11785 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11786 	CSR_WRITE(sc, WMREG_EECD, reg);
   11787 	CSR_WRITE_FLUSH(sc);
   11788 	delay(2);
   11789 
   11790 out:
   11791 	sc->nvm.release(sc);
   11792 	return rv;
   11793 }
   11794 
   11795 /* Using with EERD */
   11796 
   11797 static int
   11798 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11799 {
   11800 	uint32_t attempts = 100000;
   11801 	uint32_t i, reg = 0;
   11802 	int32_t done = -1;
   11803 
   11804 	for (i = 0; i < attempts; i++) {
   11805 		reg = CSR_READ(sc, rw);
   11806 
   11807 		if (reg & EERD_DONE) {
   11808 			done = 0;
   11809 			break;
   11810 		}
   11811 		delay(5);
   11812 	}
   11813 
   11814 	return done;
   11815 }
   11816 
   11817 static int
   11818 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11819     uint16_t *data)
   11820 {
   11821 	int i, eerd = 0;
   11822 	int rv = 0;
   11823 
   11824 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11825 		device_xname(sc->sc_dev), __func__));
   11826 
   11827 	if (sc->nvm.acquire(sc) != 0)
   11828 		return -1;
   11829 
   11830 	for (i = 0; i < wordcnt; i++) {
   11831 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11832 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11833 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11834 		if (rv != 0) {
   11835 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   11836 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   11837 			break;
   11838 		}
   11839 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11840 	}
   11841 
   11842 	sc->nvm.release(sc);
   11843 	return rv;
   11844 }
   11845 
   11846 /* Flash */
   11847 
   11848 static int
   11849 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11850 {
   11851 	uint32_t eecd;
   11852 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11853 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11854 	uint8_t sig_byte = 0;
   11855 
   11856 	switch (sc->sc_type) {
   11857 	case WM_T_PCH_SPT:
   11858 		/*
   11859 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11860 		 * sector valid bits from the NVM.
   11861 		 */
   11862 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11863 		if ((*bank == 0) || (*bank == 1)) {
   11864 			aprint_error_dev(sc->sc_dev,
   11865 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11866 				*bank);
   11867 			return -1;
   11868 		} else {
   11869 			*bank = *bank - 2;
   11870 			return 0;
   11871 		}
   11872 	case WM_T_ICH8:
   11873 	case WM_T_ICH9:
   11874 		eecd = CSR_READ(sc, WMREG_EECD);
   11875 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11876 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11877 			return 0;
   11878 		}
   11879 		/* FALLTHROUGH */
   11880 	default:
   11881 		/* Default to 0 */
   11882 		*bank = 0;
   11883 
   11884 		/* Check bank 0 */
   11885 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11886 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11887 			*bank = 0;
   11888 			return 0;
   11889 		}
   11890 
   11891 		/* Check bank 1 */
   11892 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11893 		    &sig_byte);
   11894 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11895 			*bank = 1;
   11896 			return 0;
   11897 		}
   11898 	}
   11899 
   11900 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11901 		device_xname(sc->sc_dev)));
   11902 	return -1;
   11903 }
   11904 
   11905 /******************************************************************************
   11906  * This function does initial flash setup so that a new read/write/erase cycle
   11907  * can be started.
   11908  *
   11909  * sc - The pointer to the hw structure
   11910  ****************************************************************************/
   11911 static int32_t
   11912 wm_ich8_cycle_init(struct wm_softc *sc)
   11913 {
   11914 	uint16_t hsfsts;
   11915 	int32_t error = 1;
   11916 	int32_t i     = 0;
   11917 
   11918 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11919 
   11920 	/* May be check the Flash Des Valid bit in Hw status */
   11921 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11922 		return error;
   11923 	}
   11924 
   11925 	/* Clear FCERR in Hw status by writing 1 */
   11926 	/* Clear DAEL in Hw status by writing a 1 */
   11927 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11928 
   11929 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11930 
   11931 	/*
   11932 	 * Either we should have a hardware SPI cycle in progress bit to check
   11933 	 * against, in order to start a new cycle or FDONE bit should be
   11934 	 * changed in the hardware so that it is 1 after harware reset, which
   11935 	 * can then be used as an indication whether a cycle is in progress or
   11936 	 * has been completed .. we should also have some software semaphore
   11937 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11938 	 * threads access to those bits can be sequentiallized or a way so that
   11939 	 * 2 threads dont start the cycle at the same time
   11940 	 */
   11941 
   11942 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11943 		/*
   11944 		 * There is no cycle running at present, so we can start a
   11945 		 * cycle
   11946 		 */
   11947 
   11948 		/* Begin by setting Flash Cycle Done. */
   11949 		hsfsts |= HSFSTS_DONE;
   11950 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11951 		error = 0;
   11952 	} else {
   11953 		/*
   11954 		 * otherwise poll for sometime so the current cycle has a
   11955 		 * chance to end before giving up.
   11956 		 */
   11957 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11958 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11959 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11960 				error = 0;
   11961 				break;
   11962 			}
   11963 			delay(1);
   11964 		}
   11965 		if (error == 0) {
   11966 			/*
   11967 			 * Successful in waiting for previous cycle to timeout,
   11968 			 * now set the Flash Cycle Done.
   11969 			 */
   11970 			hsfsts |= HSFSTS_DONE;
   11971 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11972 		}
   11973 	}
   11974 	return error;
   11975 }
   11976 
   11977 /******************************************************************************
   11978  * This function starts a flash cycle and waits for its completion
   11979  *
   11980  * sc - The pointer to the hw structure
   11981  ****************************************************************************/
   11982 static int32_t
   11983 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11984 {
   11985 	uint16_t hsflctl;
   11986 	uint16_t hsfsts;
   11987 	int32_t error = 1;
   11988 	uint32_t i = 0;
   11989 
   11990 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11991 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11992 	hsflctl |= HSFCTL_GO;
   11993 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11994 
   11995 	/* Wait till FDONE bit is set to 1 */
   11996 	do {
   11997 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11998 		if (hsfsts & HSFSTS_DONE)
   11999 			break;
   12000 		delay(1);
   12001 		i++;
   12002 	} while (i < timeout);
   12003 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12004 		error = 0;
   12005 
   12006 	return error;
   12007 }
   12008 
   12009 /******************************************************************************
   12010  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12011  *
   12012  * sc - The pointer to the hw structure
   12013  * index - The index of the byte or word to read.
   12014  * size - Size of data to read, 1=byte 2=word, 4=dword
   12015  * data - Pointer to the word to store the value read.
   12016  *****************************************************************************/
   12017 static int32_t
   12018 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12019     uint32_t size, uint32_t *data)
   12020 {
   12021 	uint16_t hsfsts;
   12022 	uint16_t hsflctl;
   12023 	uint32_t flash_linear_address;
   12024 	uint32_t flash_data = 0;
   12025 	int32_t error = 1;
   12026 	int32_t count = 0;
   12027 
   12028 	if (size < 1  || size > 4 || data == 0x0 ||
   12029 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12030 		return error;
   12031 
   12032 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12033 	    sc->sc_ich8_flash_base;
   12034 
   12035 	do {
   12036 		delay(1);
   12037 		/* Steps */
   12038 		error = wm_ich8_cycle_init(sc);
   12039 		if (error)
   12040 			break;
   12041 
   12042 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12043 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12044 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12045 		    & HSFCTL_BCOUNT_MASK;
   12046 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12047 		if (sc->sc_type == WM_T_PCH_SPT) {
   12048 			/*
   12049 			 * In SPT, This register is in Lan memory space, not
   12050 			 * flash. Therefore, only 32 bit access is supported.
   12051 			 */
   12052 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   12053 			    (uint32_t)hsflctl);
   12054 		} else
   12055 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12056 
   12057 		/*
   12058 		 * Write the last 24 bits of index into Flash Linear address
   12059 		 * field in Flash Address
   12060 		 */
   12061 		/* TODO: TBD maybe check the index against the size of flash */
   12062 
   12063 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12064 
   12065 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12066 
   12067 		/*
   12068 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12069 		 * the whole sequence a few more times, else read in (shift in)
   12070 		 * the Flash Data0, the order is least significant byte first
   12071 		 * msb to lsb
   12072 		 */
   12073 		if (error == 0) {
   12074 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12075 			if (size == 1)
   12076 				*data = (uint8_t)(flash_data & 0x000000FF);
   12077 			else if (size == 2)
   12078 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12079 			else if (size == 4)
   12080 				*data = (uint32_t)flash_data;
   12081 			break;
   12082 		} else {
   12083 			/*
   12084 			 * If we've gotten here, then things are probably
   12085 			 * completely hosed, but if the error condition is
   12086 			 * detected, it won't hurt to give it another try...
   12087 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12088 			 */
   12089 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12090 			if (hsfsts & HSFSTS_ERR) {
   12091 				/* Repeat for some time before giving up. */
   12092 				continue;
   12093 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12094 				break;
   12095 		}
   12096 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12097 
   12098 	return error;
   12099 }
   12100 
   12101 /******************************************************************************
   12102  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12103  *
   12104  * sc - pointer to wm_hw structure
   12105  * index - The index of the byte to read.
   12106  * data - Pointer to a byte to store the value read.
   12107  *****************************************************************************/
   12108 static int32_t
   12109 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12110 {
   12111 	int32_t status;
   12112 	uint32_t word = 0;
   12113 
   12114 	status = wm_read_ich8_data(sc, index, 1, &word);
   12115 	if (status == 0)
   12116 		*data = (uint8_t)word;
   12117 	else
   12118 		*data = 0;
   12119 
   12120 	return status;
   12121 }
   12122 
   12123 /******************************************************************************
   12124  * Reads a word from the NVM using the ICH8 flash access registers.
   12125  *
   12126  * sc - pointer to wm_hw structure
   12127  * index - The starting byte index of the word to read.
   12128  * data - Pointer to a word to store the value read.
   12129  *****************************************************************************/
   12130 static int32_t
   12131 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12132 {
   12133 	int32_t status;
   12134 	uint32_t word = 0;
   12135 
   12136 	status = wm_read_ich8_data(sc, index, 2, &word);
   12137 	if (status == 0)
   12138 		*data = (uint16_t)word;
   12139 	else
   12140 		*data = 0;
   12141 
   12142 	return status;
   12143 }
   12144 
   12145 /******************************************************************************
   12146  * Reads a dword from the NVM using the ICH8 flash access registers.
   12147  *
   12148  * sc - pointer to wm_hw structure
   12149  * index - The starting byte index of the word to read.
   12150  * data - Pointer to a word to store the value read.
   12151  *****************************************************************************/
   12152 static int32_t
   12153 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12154 {
   12155 	int32_t status;
   12156 
   12157 	status = wm_read_ich8_data(sc, index, 4, data);
   12158 	return status;
   12159 }
   12160 
   12161 /******************************************************************************
   12162  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12163  * register.
   12164  *
   12165  * sc - Struct containing variables accessed by shared code
   12166  * offset - offset of word in the EEPROM to read
   12167  * data - word read from the EEPROM
   12168  * words - number of words to read
   12169  *****************************************************************************/
   12170 static int
   12171 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12172 {
   12173 	int32_t  rv = 0;
   12174 	uint32_t flash_bank = 0;
   12175 	uint32_t act_offset = 0;
   12176 	uint32_t bank_offset = 0;
   12177 	uint16_t word = 0;
   12178 	uint16_t i = 0;
   12179 
   12180 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12181 		device_xname(sc->sc_dev), __func__));
   12182 
   12183 	if (sc->nvm.acquire(sc) != 0)
   12184 		return -1;
   12185 
   12186 	/*
   12187 	 * We need to know which is the valid flash bank.  In the event
   12188 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12189 	 * managing flash_bank.  So it cannot be trusted and needs
   12190 	 * to be updated with each read.
   12191 	 */
   12192 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12193 	if (rv) {
   12194 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12195 			device_xname(sc->sc_dev)));
   12196 		flash_bank = 0;
   12197 	}
   12198 
   12199 	/*
   12200 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12201 	 * size
   12202 	 */
   12203 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12204 
   12205 	for (i = 0; i < words; i++) {
   12206 		/* The NVM part needs a byte offset, hence * 2 */
   12207 		act_offset = bank_offset + ((offset + i) * 2);
   12208 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12209 		if (rv) {
   12210 			aprint_error_dev(sc->sc_dev,
   12211 			    "%s: failed to read NVM\n", __func__);
   12212 			break;
   12213 		}
   12214 		data[i] = word;
   12215 	}
   12216 
   12217 	sc->nvm.release(sc);
   12218 	return rv;
   12219 }
   12220 
   12221 /******************************************************************************
   12222  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12223  * register.
   12224  *
   12225  * sc - Struct containing variables accessed by shared code
   12226  * offset - offset of word in the EEPROM to read
   12227  * data - word read from the EEPROM
   12228  * words - number of words to read
   12229  *****************************************************************************/
   12230 static int
   12231 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12232 {
   12233 	int32_t  rv = 0;
   12234 	uint32_t flash_bank = 0;
   12235 	uint32_t act_offset = 0;
   12236 	uint32_t bank_offset = 0;
   12237 	uint32_t dword = 0;
   12238 	uint16_t i = 0;
   12239 
   12240 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12241 		device_xname(sc->sc_dev), __func__));
   12242 
   12243 	if (sc->nvm.acquire(sc) != 0)
   12244 		return -1;
   12245 
   12246 	/*
   12247 	 * We need to know which is the valid flash bank.  In the event
   12248 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12249 	 * managing flash_bank.  So it cannot be trusted and needs
   12250 	 * to be updated with each read.
   12251 	 */
   12252 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12253 	if (rv) {
   12254 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12255 			device_xname(sc->sc_dev)));
   12256 		flash_bank = 0;
   12257 	}
   12258 
   12259 	/*
   12260 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12261 	 * size
   12262 	 */
   12263 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12264 
   12265 	for (i = 0; i < words; i++) {
   12266 		/* The NVM part needs a byte offset, hence * 2 */
   12267 		act_offset = bank_offset + ((offset + i) * 2);
   12268 		/* but we must read dword aligned, so mask ... */
   12269 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12270 		if (rv) {
   12271 			aprint_error_dev(sc->sc_dev,
   12272 			    "%s: failed to read NVM\n", __func__);
   12273 			break;
   12274 		}
   12275 		/* ... and pick out low or high word */
   12276 		if ((act_offset & 0x2) == 0)
   12277 			data[i] = (uint16_t)(dword & 0xFFFF);
   12278 		else
   12279 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12280 	}
   12281 
   12282 	sc->nvm.release(sc);
   12283 	return rv;
   12284 }
   12285 
   12286 /* iNVM */
   12287 
   12288 static int
   12289 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12290 {
   12291 	int32_t  rv = 0;
   12292 	uint32_t invm_dword;
   12293 	uint16_t i;
   12294 	uint8_t record_type, word_address;
   12295 
   12296 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12297 		device_xname(sc->sc_dev), __func__));
   12298 
   12299 	for (i = 0; i < INVM_SIZE; i++) {
   12300 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12301 		/* Get record type */
   12302 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12303 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12304 			break;
   12305 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12306 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12307 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12308 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12309 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12310 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12311 			if (word_address == address) {
   12312 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12313 				rv = 0;
   12314 				break;
   12315 			}
   12316 		}
   12317 	}
   12318 
   12319 	return rv;
   12320 }
   12321 
   12322 static int
   12323 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12324 {
   12325 	int rv = 0;
   12326 	int i;
   12327 
   12328 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12329 		device_xname(sc->sc_dev), __func__));
   12330 
   12331 	if (sc->nvm.acquire(sc) != 0)
   12332 		return -1;
   12333 
   12334 	for (i = 0; i < words; i++) {
   12335 		switch (offset + i) {
   12336 		case NVM_OFF_MACADDR:
   12337 		case NVM_OFF_MACADDR1:
   12338 		case NVM_OFF_MACADDR2:
   12339 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12340 			if (rv != 0) {
   12341 				data[i] = 0xffff;
   12342 				rv = -1;
   12343 			}
   12344 			break;
   12345 		case NVM_OFF_CFG2:
   12346 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12347 			if (rv != 0) {
   12348 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12349 				rv = 0;
   12350 			}
   12351 			break;
   12352 		case NVM_OFF_CFG4:
   12353 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12354 			if (rv != 0) {
   12355 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12356 				rv = 0;
   12357 			}
   12358 			break;
   12359 		case NVM_OFF_LED_1_CFG:
   12360 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12361 			if (rv != 0) {
   12362 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12363 				rv = 0;
   12364 			}
   12365 			break;
   12366 		case NVM_OFF_LED_0_2_CFG:
   12367 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12368 			if (rv != 0) {
   12369 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12370 				rv = 0;
   12371 			}
   12372 			break;
   12373 		case NVM_OFF_ID_LED_SETTINGS:
   12374 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12375 			if (rv != 0) {
   12376 				*data = ID_LED_RESERVED_FFFF;
   12377 				rv = 0;
   12378 			}
   12379 			break;
   12380 		default:
   12381 			DPRINTF(WM_DEBUG_NVM,
   12382 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12383 			*data = NVM_RESERVED_WORD;
   12384 			break;
   12385 		}
   12386 	}
   12387 
   12388 	sc->nvm.release(sc);
   12389 	return rv;
   12390 }
   12391 
   12392 /* Lock, detecting NVM type, validate checksum, version and read */
   12393 
   12394 static int
   12395 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12396 {
   12397 	uint32_t eecd = 0;
   12398 
   12399 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12400 	    || sc->sc_type == WM_T_82583) {
   12401 		eecd = CSR_READ(sc, WMREG_EECD);
   12402 
   12403 		/* Isolate bits 15 & 16 */
   12404 		eecd = ((eecd >> 15) & 0x03);
   12405 
   12406 		/* If both bits are set, device is Flash type */
   12407 		if (eecd == 0x03)
   12408 			return 0;
   12409 	}
   12410 	return 1;
   12411 }
   12412 
   12413 static int
   12414 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12415 {
   12416 	uint32_t eec;
   12417 
   12418 	eec = CSR_READ(sc, WMREG_EEC);
   12419 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12420 		return 1;
   12421 
   12422 	return 0;
   12423 }
   12424 
   12425 /*
   12426  * wm_nvm_validate_checksum
   12427  *
   12428  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12429  */
   12430 static int
   12431 wm_nvm_validate_checksum(struct wm_softc *sc)
   12432 {
   12433 	uint16_t checksum;
   12434 	uint16_t eeprom_data;
   12435 #ifdef WM_DEBUG
   12436 	uint16_t csum_wordaddr, valid_checksum;
   12437 #endif
   12438 	int i;
   12439 
   12440 	checksum = 0;
   12441 
   12442 	/* Don't check for I211 */
   12443 	if (sc->sc_type == WM_T_I211)
   12444 		return 0;
   12445 
   12446 #ifdef WM_DEBUG
   12447 	if (sc->sc_type == WM_T_PCH_LPT) {
   12448 		csum_wordaddr = NVM_OFF_COMPAT;
   12449 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12450 	} else {
   12451 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12452 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12453 	}
   12454 
   12455 	/* Dump EEPROM image for debug */
   12456 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12457 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12458 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12459 		/* XXX PCH_SPT? */
   12460 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12461 		if ((eeprom_data & valid_checksum) == 0) {
   12462 			DPRINTF(WM_DEBUG_NVM,
   12463 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12464 				device_xname(sc->sc_dev), eeprom_data,
   12465 				    valid_checksum));
   12466 		}
   12467 	}
   12468 
   12469 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12470 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12471 		for (i = 0; i < NVM_SIZE; i++) {
   12472 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12473 				printf("XXXX ");
   12474 			else
   12475 				printf("%04hx ", eeprom_data);
   12476 			if (i % 8 == 7)
   12477 				printf("\n");
   12478 		}
   12479 	}
   12480 
   12481 #endif /* WM_DEBUG */
   12482 
   12483 	for (i = 0; i < NVM_SIZE; i++) {
   12484 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12485 			return 1;
   12486 		checksum += eeprom_data;
   12487 	}
   12488 
   12489 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12490 #ifdef WM_DEBUG
   12491 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12492 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12493 #endif
   12494 	}
   12495 
   12496 	return 0;
   12497 }
   12498 
   12499 static void
   12500 wm_nvm_version_invm(struct wm_softc *sc)
   12501 {
   12502 	uint32_t dword;
   12503 
   12504 	/*
   12505 	 * Linux's code to decode version is very strange, so we don't
   12506 	 * obey that algorithm and just use word 61 as the document.
   12507 	 * Perhaps it's not perfect though...
   12508 	 *
   12509 	 * Example:
   12510 	 *
   12511 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12512 	 */
   12513 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12514 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12515 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12516 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12517 }
   12518 
   12519 static void
   12520 wm_nvm_version(struct wm_softc *sc)
   12521 {
   12522 	uint16_t major, minor, build, patch;
   12523 	uint16_t uid0, uid1;
   12524 	uint16_t nvm_data;
   12525 	uint16_t off;
   12526 	bool check_version = false;
   12527 	bool check_optionrom = false;
   12528 	bool have_build = false;
   12529 	bool have_uid = true;
   12530 
   12531 	/*
   12532 	 * Version format:
   12533 	 *
   12534 	 * XYYZ
   12535 	 * X0YZ
   12536 	 * X0YY
   12537 	 *
   12538 	 * Example:
   12539 	 *
   12540 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12541 	 *	82571	0x50a6	5.10.6?
   12542 	 *	82572	0x506a	5.6.10?
   12543 	 *	82572EI	0x5069	5.6.9?
   12544 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12545 	 *		0x2013	2.1.3?
   12546 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12547 	 */
   12548 
   12549 	/*
   12550 	 * XXX
   12551 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12552 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12553 	 */
   12554 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12555 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12556 		have_uid = false;
   12557 
   12558 	switch (sc->sc_type) {
   12559 	case WM_T_82571:
   12560 	case WM_T_82572:
   12561 	case WM_T_82574:
   12562 	case WM_T_82583:
   12563 		check_version = true;
   12564 		check_optionrom = true;
   12565 		have_build = true;
   12566 		break;
   12567 	case WM_T_82575:
   12568 	case WM_T_82576:
   12569 	case WM_T_82580:
   12570 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12571 			check_version = true;
   12572 		break;
   12573 	case WM_T_I211:
   12574 		wm_nvm_version_invm(sc);
   12575 		have_uid = false;
   12576 		goto printver;
   12577 	case WM_T_I210:
   12578 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12579 			wm_nvm_version_invm(sc);
   12580 			have_uid = false;
   12581 			goto printver;
   12582 		}
   12583 		/* FALLTHROUGH */
   12584 	case WM_T_I350:
   12585 	case WM_T_I354:
   12586 		check_version = true;
   12587 		check_optionrom = true;
   12588 		break;
   12589 	default:
   12590 		return;
   12591 	}
   12592 	if (check_version
   12593 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   12594 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12595 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12596 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12597 			build = nvm_data & NVM_BUILD_MASK;
   12598 			have_build = true;
   12599 		} else
   12600 			minor = nvm_data & 0x00ff;
   12601 
   12602 		/* Decimal */
   12603 		minor = (minor / 16) * 10 + (minor % 16);
   12604 		sc->sc_nvm_ver_major = major;
   12605 		sc->sc_nvm_ver_minor = minor;
   12606 
   12607 printver:
   12608 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12609 		    sc->sc_nvm_ver_minor);
   12610 		if (have_build) {
   12611 			sc->sc_nvm_ver_build = build;
   12612 			aprint_verbose(".%d", build);
   12613 		}
   12614 	}
   12615 
   12616 	/* Assume the Option ROM area is at avove NVM_SIZE */
   12617 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   12618 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   12619 		/* Option ROM Version */
   12620 		if ((off != 0x0000) && (off != 0xffff)) {
   12621 			int rv;
   12622 
   12623 			off += NVM_COMBO_VER_OFF;
   12624 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   12625 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   12626 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   12627 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12628 				/* 16bits */
   12629 				major = uid0 >> 8;
   12630 				build = (uid0 << 8) | (uid1 >> 8);
   12631 				patch = uid1 & 0x00ff;
   12632 				aprint_verbose(", option ROM Version %d.%d.%d",
   12633 				    major, build, patch);
   12634 			}
   12635 		}
   12636 	}
   12637 
   12638 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   12639 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12640 }
   12641 
   12642 /*
   12643  * wm_nvm_read:
   12644  *
   12645  *	Read data from the serial EEPROM.
   12646  */
   12647 static int
   12648 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12649 {
   12650 	int rv;
   12651 
   12652 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12653 		device_xname(sc->sc_dev), __func__));
   12654 
   12655 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12656 		return -1;
   12657 
   12658 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12659 
   12660 	return rv;
   12661 }
   12662 
   12663 /*
   12664  * Hardware semaphores.
   12665  * Very complexed...
   12666  */
   12667 
   12668 static int
   12669 wm_get_null(struct wm_softc *sc)
   12670 {
   12671 
   12672 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12673 		device_xname(sc->sc_dev), __func__));
   12674 	return 0;
   12675 }
   12676 
   12677 static void
   12678 wm_put_null(struct wm_softc *sc)
   12679 {
   12680 
   12681 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12682 		device_xname(sc->sc_dev), __func__));
   12683 	return;
   12684 }
   12685 
   12686 static int
   12687 wm_get_eecd(struct wm_softc *sc)
   12688 {
   12689 	uint32_t reg;
   12690 	int x;
   12691 
   12692 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12693 		device_xname(sc->sc_dev), __func__));
   12694 
   12695 	reg = CSR_READ(sc, WMREG_EECD);
   12696 
   12697 	/* Request EEPROM access. */
   12698 	reg |= EECD_EE_REQ;
   12699 	CSR_WRITE(sc, WMREG_EECD, reg);
   12700 
   12701 	/* ..and wait for it to be granted. */
   12702 	for (x = 0; x < 1000; x++) {
   12703 		reg = CSR_READ(sc, WMREG_EECD);
   12704 		if (reg & EECD_EE_GNT)
   12705 			break;
   12706 		delay(5);
   12707 	}
   12708 	if ((reg & EECD_EE_GNT) == 0) {
   12709 		aprint_error_dev(sc->sc_dev,
   12710 		    "could not acquire EEPROM GNT\n");
   12711 		reg &= ~EECD_EE_REQ;
   12712 		CSR_WRITE(sc, WMREG_EECD, reg);
   12713 		return -1;
   12714 	}
   12715 
   12716 	return 0;
   12717 }
   12718 
   12719 static void
   12720 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   12721 {
   12722 
   12723 	*eecd |= EECD_SK;
   12724 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12725 	CSR_WRITE_FLUSH(sc);
   12726 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12727 		delay(1);
   12728 	else
   12729 		delay(50);
   12730 }
   12731 
   12732 static void
   12733 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   12734 {
   12735 
   12736 	*eecd &= ~EECD_SK;
   12737 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12738 	CSR_WRITE_FLUSH(sc);
   12739 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12740 		delay(1);
   12741 	else
   12742 		delay(50);
   12743 }
   12744 
   12745 static void
   12746 wm_put_eecd(struct wm_softc *sc)
   12747 {
   12748 	uint32_t reg;
   12749 
   12750 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12751 		device_xname(sc->sc_dev), __func__));
   12752 
   12753 	/* Stop nvm */
   12754 	reg = CSR_READ(sc, WMREG_EECD);
   12755 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   12756 		/* Pull CS high */
   12757 		reg |= EECD_CS;
   12758 		wm_nvm_eec_clock_lower(sc, &reg);
   12759 	} else {
   12760 		/* CS on Microwire is active-high */
   12761 		reg &= ~(EECD_CS | EECD_DI);
   12762 		CSR_WRITE(sc, WMREG_EECD, reg);
   12763 		wm_nvm_eec_clock_raise(sc, &reg);
   12764 		wm_nvm_eec_clock_lower(sc, &reg);
   12765 	}
   12766 
   12767 	reg = CSR_READ(sc, WMREG_EECD);
   12768 	reg &= ~EECD_EE_REQ;
   12769 	CSR_WRITE(sc, WMREG_EECD, reg);
   12770 
   12771 	return;
   12772 }
   12773 
   12774 /*
   12775  * Get hardware semaphore.
   12776  * Same as e1000_get_hw_semaphore_generic()
   12777  */
   12778 static int
   12779 wm_get_swsm_semaphore(struct wm_softc *sc)
   12780 {
   12781 	int32_t timeout;
   12782 	uint32_t swsm;
   12783 
   12784 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12785 		device_xname(sc->sc_dev), __func__));
   12786 	KASSERT(sc->sc_nvm_wordsize > 0);
   12787 
   12788 retry:
   12789 	/* Get the SW semaphore. */
   12790 	timeout = sc->sc_nvm_wordsize + 1;
   12791 	while (timeout) {
   12792 		swsm = CSR_READ(sc, WMREG_SWSM);
   12793 
   12794 		if ((swsm & SWSM_SMBI) == 0)
   12795 			break;
   12796 
   12797 		delay(50);
   12798 		timeout--;
   12799 	}
   12800 
   12801 	if (timeout == 0) {
   12802 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   12803 			/*
   12804 			 * In rare circumstances, the SW semaphore may already
   12805 			 * be held unintentionally. Clear the semaphore once
   12806 			 * before giving up.
   12807 			 */
   12808 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   12809 			wm_put_swsm_semaphore(sc);
   12810 			goto retry;
   12811 		}
   12812 		aprint_error_dev(sc->sc_dev,
   12813 		    "could not acquire SWSM SMBI\n");
   12814 		return 1;
   12815 	}
   12816 
   12817 	/* Get the FW semaphore. */
   12818 	timeout = sc->sc_nvm_wordsize + 1;
   12819 	while (timeout) {
   12820 		swsm = CSR_READ(sc, WMREG_SWSM);
   12821 		swsm |= SWSM_SWESMBI;
   12822 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12823 		/* If we managed to set the bit we got the semaphore. */
   12824 		swsm = CSR_READ(sc, WMREG_SWSM);
   12825 		if (swsm & SWSM_SWESMBI)
   12826 			break;
   12827 
   12828 		delay(50);
   12829 		timeout--;
   12830 	}
   12831 
   12832 	if (timeout == 0) {
   12833 		aprint_error_dev(sc->sc_dev,
   12834 		    "could not acquire SWSM SWESMBI\n");
   12835 		/* Release semaphores */
   12836 		wm_put_swsm_semaphore(sc);
   12837 		return 1;
   12838 	}
   12839 	return 0;
   12840 }
   12841 
   12842 /*
   12843  * Put hardware semaphore.
   12844  * Same as e1000_put_hw_semaphore_generic()
   12845  */
   12846 static void
   12847 wm_put_swsm_semaphore(struct wm_softc *sc)
   12848 {
   12849 	uint32_t swsm;
   12850 
   12851 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12852 		device_xname(sc->sc_dev), __func__));
   12853 
   12854 	swsm = CSR_READ(sc, WMREG_SWSM);
   12855 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12856 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12857 }
   12858 
   12859 /*
   12860  * Get SW/FW semaphore.
   12861  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   12862  */
   12863 static int
   12864 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12865 {
   12866 	uint32_t swfw_sync;
   12867 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12868 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12869 	int timeout;
   12870 
   12871 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12872 		device_xname(sc->sc_dev), __func__));
   12873 
   12874 	if (sc->sc_type == WM_T_80003)
   12875 		timeout = 50;
   12876 	else
   12877 		timeout = 200;
   12878 
   12879 	for (timeout = 0; timeout < 200; timeout++) {
   12880 		if (wm_get_swsm_semaphore(sc)) {
   12881 			aprint_error_dev(sc->sc_dev,
   12882 			    "%s: failed to get semaphore\n",
   12883 			    __func__);
   12884 			return 1;
   12885 		}
   12886 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12887 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12888 			swfw_sync |= swmask;
   12889 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12890 			wm_put_swsm_semaphore(sc);
   12891 			return 0;
   12892 		}
   12893 		wm_put_swsm_semaphore(sc);
   12894 		delay(5000);
   12895 	}
   12896 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12897 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12898 	return 1;
   12899 }
   12900 
   12901 static void
   12902 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12903 {
   12904 	uint32_t swfw_sync;
   12905 
   12906 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12907 		device_xname(sc->sc_dev), __func__));
   12908 
   12909 	while (wm_get_swsm_semaphore(sc) != 0)
   12910 		continue;
   12911 
   12912 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12913 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12914 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12915 
   12916 	wm_put_swsm_semaphore(sc);
   12917 }
   12918 
   12919 static int
   12920 wm_get_nvm_80003(struct wm_softc *sc)
   12921 {
   12922 	int rv;
   12923 
   12924 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12925 		device_xname(sc->sc_dev), __func__));
   12926 
   12927 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   12928 		aprint_error_dev(sc->sc_dev,
   12929 		    "%s: failed to get semaphore(SWFW)\n",
   12930 		    __func__);
   12931 		return rv;
   12932 	}
   12933 
   12934 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12935 	    && (rv = wm_get_eecd(sc)) != 0) {
   12936 		aprint_error_dev(sc->sc_dev,
   12937 		    "%s: failed to get semaphore(EECD)\n",
   12938 		    __func__);
   12939 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12940 		return rv;
   12941 	}
   12942 
   12943 	return 0;
   12944 }
   12945 
   12946 static void
   12947 wm_put_nvm_80003(struct wm_softc *sc)
   12948 {
   12949 
   12950 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12951 		device_xname(sc->sc_dev), __func__));
   12952 
   12953 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12954 		wm_put_eecd(sc);
   12955 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12956 }
   12957 
   12958 static int
   12959 wm_get_nvm_82571(struct wm_softc *sc)
   12960 {
   12961 	int rv;
   12962 
   12963 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12964 		device_xname(sc->sc_dev), __func__));
   12965 
   12966 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   12967 		return rv;
   12968 
   12969 	switch (sc->sc_type) {
   12970 	case WM_T_82573:
   12971 		break;
   12972 	default:
   12973 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12974 			rv = wm_get_eecd(sc);
   12975 		break;
   12976 	}
   12977 
   12978 	if (rv != 0) {
   12979 		aprint_error_dev(sc->sc_dev,
   12980 		    "%s: failed to get semaphore\n",
   12981 		    __func__);
   12982 		wm_put_swsm_semaphore(sc);
   12983 	}
   12984 
   12985 	return rv;
   12986 }
   12987 
   12988 static void
   12989 wm_put_nvm_82571(struct wm_softc *sc)
   12990 {
   12991 
   12992 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12993 		device_xname(sc->sc_dev), __func__));
   12994 
   12995 	switch (sc->sc_type) {
   12996 	case WM_T_82573:
   12997 		break;
   12998 	default:
   12999 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13000 			wm_put_eecd(sc);
   13001 		break;
   13002 	}
   13003 
   13004 	wm_put_swsm_semaphore(sc);
   13005 }
   13006 
   13007 static int
   13008 wm_get_phy_82575(struct wm_softc *sc)
   13009 {
   13010 
   13011 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13012 		device_xname(sc->sc_dev), __func__));
   13013 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13014 }
   13015 
   13016 static void
   13017 wm_put_phy_82575(struct wm_softc *sc)
   13018 {
   13019 
   13020 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13021 		device_xname(sc->sc_dev), __func__));
   13022 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13023 }
   13024 
   13025 static int
   13026 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13027 {
   13028 	uint32_t ext_ctrl;
   13029 	int timeout = 200;
   13030 
   13031 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13032 		device_xname(sc->sc_dev), __func__));
   13033 
   13034 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13035 	for (timeout = 0; timeout < 200; timeout++) {
   13036 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13037 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13038 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13039 
   13040 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13041 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13042 			return 0;
   13043 		delay(5000);
   13044 	}
   13045 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13046 	    device_xname(sc->sc_dev), ext_ctrl);
   13047 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13048 	return 1;
   13049 }
   13050 
   13051 static void
   13052 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13053 {
   13054 	uint32_t ext_ctrl;
   13055 
   13056 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13057 		device_xname(sc->sc_dev), __func__));
   13058 
   13059 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13060 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13061 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13062 
   13063 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13064 }
   13065 
   13066 static int
   13067 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13068 {
   13069 	uint32_t ext_ctrl;
   13070 	int timeout;
   13071 
   13072 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13073 		device_xname(sc->sc_dev), __func__));
   13074 	mutex_enter(sc->sc_ich_phymtx);
   13075 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13076 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13077 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13078 			break;
   13079 		delay(1000);
   13080 	}
   13081 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13082 		printf("%s: SW has already locked the resource\n",
   13083 		    device_xname(sc->sc_dev));
   13084 		goto out;
   13085 	}
   13086 
   13087 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13088 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13089 	for (timeout = 0; timeout < 1000; timeout++) {
   13090 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13091 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13092 			break;
   13093 		delay(1000);
   13094 	}
   13095 	if (timeout >= 1000) {
   13096 		printf("%s: failed to acquire semaphore\n",
   13097 		    device_xname(sc->sc_dev));
   13098 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13099 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13100 		goto out;
   13101 	}
   13102 	return 0;
   13103 
   13104 out:
   13105 	mutex_exit(sc->sc_ich_phymtx);
   13106 	return 1;
   13107 }
   13108 
   13109 static void
   13110 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13111 {
   13112 	uint32_t ext_ctrl;
   13113 
   13114 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13115 		device_xname(sc->sc_dev), __func__));
   13116 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13117 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13118 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13119 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13120 	} else {
   13121 		printf("%s: Semaphore unexpectedly released\n",
   13122 		    device_xname(sc->sc_dev));
   13123 	}
   13124 
   13125 	mutex_exit(sc->sc_ich_phymtx);
   13126 }
   13127 
   13128 static int
   13129 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13130 {
   13131 
   13132 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13133 		device_xname(sc->sc_dev), __func__));
   13134 	mutex_enter(sc->sc_ich_nvmmtx);
   13135 
   13136 	return 0;
   13137 }
   13138 
   13139 static void
   13140 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13141 {
   13142 
   13143 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13144 		device_xname(sc->sc_dev), __func__));
   13145 	mutex_exit(sc->sc_ich_nvmmtx);
   13146 }
   13147 
   13148 static int
   13149 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13150 {
   13151 	int i = 0;
   13152 	uint32_t reg;
   13153 
   13154 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13155 		device_xname(sc->sc_dev), __func__));
   13156 
   13157 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13158 	do {
   13159 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13160 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13161 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13162 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13163 			break;
   13164 		delay(2*1000);
   13165 		i++;
   13166 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13167 
   13168 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13169 		wm_put_hw_semaphore_82573(sc);
   13170 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13171 		    device_xname(sc->sc_dev));
   13172 		return -1;
   13173 	}
   13174 
   13175 	return 0;
   13176 }
   13177 
   13178 static void
   13179 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13180 {
   13181 	uint32_t reg;
   13182 
   13183 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13184 		device_xname(sc->sc_dev), __func__));
   13185 
   13186 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13187 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13188 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13189 }
   13190 
   13191 /*
   13192  * Management mode and power management related subroutines.
   13193  * BMC, AMT, suspend/resume and EEE.
   13194  */
   13195 
   13196 #ifdef WM_WOL
   13197 static int
   13198 wm_check_mng_mode(struct wm_softc *sc)
   13199 {
   13200 	int rv;
   13201 
   13202 	switch (sc->sc_type) {
   13203 	case WM_T_ICH8:
   13204 	case WM_T_ICH9:
   13205 	case WM_T_ICH10:
   13206 	case WM_T_PCH:
   13207 	case WM_T_PCH2:
   13208 	case WM_T_PCH_LPT:
   13209 	case WM_T_PCH_SPT:
   13210 		rv = wm_check_mng_mode_ich8lan(sc);
   13211 		break;
   13212 	case WM_T_82574:
   13213 	case WM_T_82583:
   13214 		rv = wm_check_mng_mode_82574(sc);
   13215 		break;
   13216 	case WM_T_82571:
   13217 	case WM_T_82572:
   13218 	case WM_T_82573:
   13219 	case WM_T_80003:
   13220 		rv = wm_check_mng_mode_generic(sc);
   13221 		break;
   13222 	default:
   13223 		/* noting to do */
   13224 		rv = 0;
   13225 		break;
   13226 	}
   13227 
   13228 	return rv;
   13229 }
   13230 
   13231 static int
   13232 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13233 {
   13234 	uint32_t fwsm;
   13235 
   13236 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13237 
   13238 	if (((fwsm & FWSM_FW_VALID) != 0)
   13239 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13240 		return 1;
   13241 
   13242 	return 0;
   13243 }
   13244 
   13245 static int
   13246 wm_check_mng_mode_82574(struct wm_softc *sc)
   13247 {
   13248 	uint16_t data;
   13249 
   13250 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13251 
   13252 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13253 		return 1;
   13254 
   13255 	return 0;
   13256 }
   13257 
   13258 static int
   13259 wm_check_mng_mode_generic(struct wm_softc *sc)
   13260 {
   13261 	uint32_t fwsm;
   13262 
   13263 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13264 
   13265 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13266 		return 1;
   13267 
   13268 	return 0;
   13269 }
   13270 #endif /* WM_WOL */
   13271 
   13272 static int
   13273 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13274 {
   13275 	uint32_t manc, fwsm, factps;
   13276 
   13277 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13278 		return 0;
   13279 
   13280 	manc = CSR_READ(sc, WMREG_MANC);
   13281 
   13282 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13283 		device_xname(sc->sc_dev), manc));
   13284 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13285 		return 0;
   13286 
   13287 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13288 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13289 		factps = CSR_READ(sc, WMREG_FACTPS);
   13290 		if (((factps & FACTPS_MNGCG) == 0)
   13291 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13292 			return 1;
   13293 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13294 		uint16_t data;
   13295 
   13296 		factps = CSR_READ(sc, WMREG_FACTPS);
   13297 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13298 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13299 			device_xname(sc->sc_dev), factps, data));
   13300 		if (((factps & FACTPS_MNGCG) == 0)
   13301 		    && ((data & NVM_CFG2_MNGM_MASK)
   13302 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13303 			return 1;
   13304 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13305 	    && ((manc & MANC_ASF_EN) == 0))
   13306 		return 1;
   13307 
   13308 	return 0;
   13309 }
   13310 
   13311 static bool
   13312 wm_phy_resetisblocked(struct wm_softc *sc)
   13313 {
   13314 	bool blocked = false;
   13315 	uint32_t reg;
   13316 	int i = 0;
   13317 
   13318 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13319 		device_xname(sc->sc_dev), __func__));
   13320 
   13321 	switch (sc->sc_type) {
   13322 	case WM_T_ICH8:
   13323 	case WM_T_ICH9:
   13324 	case WM_T_ICH10:
   13325 	case WM_T_PCH:
   13326 	case WM_T_PCH2:
   13327 	case WM_T_PCH_LPT:
   13328 	case WM_T_PCH_SPT:
   13329 		do {
   13330 			reg = CSR_READ(sc, WMREG_FWSM);
   13331 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13332 				blocked = true;
   13333 				delay(10*1000);
   13334 				continue;
   13335 			}
   13336 			blocked = false;
   13337 		} while (blocked && (i++ < 30));
   13338 		return blocked;
   13339 		break;
   13340 	case WM_T_82571:
   13341 	case WM_T_82572:
   13342 	case WM_T_82573:
   13343 	case WM_T_82574:
   13344 	case WM_T_82583:
   13345 	case WM_T_80003:
   13346 		reg = CSR_READ(sc, WMREG_MANC);
   13347 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13348 			return true;
   13349 		else
   13350 			return false;
   13351 		break;
   13352 	default:
   13353 		/* no problem */
   13354 		break;
   13355 	}
   13356 
   13357 	return false;
   13358 }
   13359 
   13360 static void
   13361 wm_get_hw_control(struct wm_softc *sc)
   13362 {
   13363 	uint32_t reg;
   13364 
   13365 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13366 		device_xname(sc->sc_dev), __func__));
   13367 
   13368 	if (sc->sc_type == WM_T_82573) {
   13369 		reg = CSR_READ(sc, WMREG_SWSM);
   13370 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13371 	} else if (sc->sc_type >= WM_T_82571) {
   13372 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13373 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13374 	}
   13375 }
   13376 
   13377 static void
   13378 wm_release_hw_control(struct wm_softc *sc)
   13379 {
   13380 	uint32_t reg;
   13381 
   13382 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13383 		device_xname(sc->sc_dev), __func__));
   13384 
   13385 	if (sc->sc_type == WM_T_82573) {
   13386 		reg = CSR_READ(sc, WMREG_SWSM);
   13387 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13388 	} else if (sc->sc_type >= WM_T_82571) {
   13389 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13390 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13391 	}
   13392 }
   13393 
   13394 static void
   13395 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13396 {
   13397 	uint32_t reg;
   13398 
   13399 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13400 		device_xname(sc->sc_dev), __func__));
   13401 
   13402 	if (sc->sc_type < WM_T_PCH2)
   13403 		return;
   13404 
   13405 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13406 
   13407 	if (gate)
   13408 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13409 	else
   13410 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13411 
   13412 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13413 }
   13414 
   13415 static void
   13416 wm_smbustopci(struct wm_softc *sc)
   13417 {
   13418 	uint32_t fwsm, reg;
   13419 	int rv = 0;
   13420 
   13421 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13422 		device_xname(sc->sc_dev), __func__));
   13423 
   13424 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13425 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13426 
   13427 	/* Disable ULP */
   13428 	wm_ulp_disable(sc);
   13429 
   13430 	/* Acquire PHY semaphore */
   13431 	sc->phy.acquire(sc);
   13432 
   13433 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13434 	switch (sc->sc_type) {
   13435 	case WM_T_PCH_LPT:
   13436 	case WM_T_PCH_SPT:
   13437 		if (wm_phy_is_accessible_pchlan(sc))
   13438 			break;
   13439 
   13440 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13441 		reg |= CTRL_EXT_FORCE_SMBUS;
   13442 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13443 #if 0
   13444 		/* XXX Isn't this required??? */
   13445 		CSR_WRITE_FLUSH(sc);
   13446 #endif
   13447 		delay(50 * 1000);
   13448 		/* FALLTHROUGH */
   13449 	case WM_T_PCH2:
   13450 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13451 			break;
   13452 		/* FALLTHROUGH */
   13453 	case WM_T_PCH:
   13454 		if (sc->sc_type == WM_T_PCH)
   13455 			if ((fwsm & FWSM_FW_VALID) != 0)
   13456 				break;
   13457 
   13458 		if (wm_phy_resetisblocked(sc) == true) {
   13459 			printf("XXX reset is blocked(3)\n");
   13460 			break;
   13461 		}
   13462 
   13463 		wm_toggle_lanphypc_pch_lpt(sc);
   13464 
   13465 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13466 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13467 				break;
   13468 
   13469 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13470 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13471 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13472 
   13473 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13474 				break;
   13475 			rv = -1;
   13476 		}
   13477 		break;
   13478 	default:
   13479 		break;
   13480 	}
   13481 
   13482 	/* Release semaphore */
   13483 	sc->phy.release(sc);
   13484 
   13485 	if (rv == 0) {
   13486 		if (wm_phy_resetisblocked(sc)) {
   13487 			printf("XXX reset is blocked(4)\n");
   13488 			goto out;
   13489 		}
   13490 		wm_reset_phy(sc);
   13491 		if (wm_phy_resetisblocked(sc))
   13492 			printf("XXX reset is blocked(4)\n");
   13493 	}
   13494 
   13495 out:
   13496 	/*
   13497 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13498 	 */
   13499 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13500 		delay(10*1000);
   13501 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13502 	}
   13503 }
   13504 
   13505 static void
   13506 wm_init_manageability(struct wm_softc *sc)
   13507 {
   13508 
   13509 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13510 		device_xname(sc->sc_dev), __func__));
   13511 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13512 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13513 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13514 
   13515 		/* Disable hardware interception of ARP */
   13516 		manc &= ~MANC_ARP_EN;
   13517 
   13518 		/* Enable receiving management packets to the host */
   13519 		if (sc->sc_type >= WM_T_82571) {
   13520 			manc |= MANC_EN_MNG2HOST;
   13521 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13522 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13523 		}
   13524 
   13525 		CSR_WRITE(sc, WMREG_MANC, manc);
   13526 	}
   13527 }
   13528 
   13529 static void
   13530 wm_release_manageability(struct wm_softc *sc)
   13531 {
   13532 
   13533 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13534 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13535 
   13536 		manc |= MANC_ARP_EN;
   13537 		if (sc->sc_type >= WM_T_82571)
   13538 			manc &= ~MANC_EN_MNG2HOST;
   13539 
   13540 		CSR_WRITE(sc, WMREG_MANC, manc);
   13541 	}
   13542 }
   13543 
   13544 static void
   13545 wm_get_wakeup(struct wm_softc *sc)
   13546 {
   13547 
   13548 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13549 	switch (sc->sc_type) {
   13550 	case WM_T_82573:
   13551 	case WM_T_82583:
   13552 		sc->sc_flags |= WM_F_HAS_AMT;
   13553 		/* FALLTHROUGH */
   13554 	case WM_T_80003:
   13555 	case WM_T_82575:
   13556 	case WM_T_82576:
   13557 	case WM_T_82580:
   13558 	case WM_T_I350:
   13559 	case WM_T_I354:
   13560 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13561 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13562 		/* FALLTHROUGH */
   13563 	case WM_T_82541:
   13564 	case WM_T_82541_2:
   13565 	case WM_T_82547:
   13566 	case WM_T_82547_2:
   13567 	case WM_T_82571:
   13568 	case WM_T_82572:
   13569 	case WM_T_82574:
   13570 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13571 		break;
   13572 	case WM_T_ICH8:
   13573 	case WM_T_ICH9:
   13574 	case WM_T_ICH10:
   13575 	case WM_T_PCH:
   13576 	case WM_T_PCH2:
   13577 	case WM_T_PCH_LPT:
   13578 	case WM_T_PCH_SPT:
   13579 		sc->sc_flags |= WM_F_HAS_AMT;
   13580 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13581 		break;
   13582 	default:
   13583 		break;
   13584 	}
   13585 
   13586 	/* 1: HAS_MANAGE */
   13587 	if (wm_enable_mng_pass_thru(sc) != 0)
   13588 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13589 
   13590 	/*
   13591 	 * Note that the WOL flags is set after the resetting of the eeprom
   13592 	 * stuff
   13593 	 */
   13594 }
   13595 
   13596 /*
   13597  * Unconfigure Ultra Low Power mode.
   13598  * Only for I217 and newer (see below).
   13599  */
   13600 static void
   13601 wm_ulp_disable(struct wm_softc *sc)
   13602 {
   13603 	uint32_t reg;
   13604 	int i = 0;
   13605 
   13606 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13607 		device_xname(sc->sc_dev), __func__));
   13608 	/* Exclude old devices */
   13609 	if ((sc->sc_type < WM_T_PCH_LPT)
   13610 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13611 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13612 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13613 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13614 		return;
   13615 
   13616 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13617 		/* Request ME un-configure ULP mode in the PHY */
   13618 		reg = CSR_READ(sc, WMREG_H2ME);
   13619 		reg &= ~H2ME_ULP;
   13620 		reg |= H2ME_ENFORCE_SETTINGS;
   13621 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13622 
   13623 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13624 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13625 			if (i++ == 30) {
   13626 				printf("%s timed out\n", __func__);
   13627 				return;
   13628 			}
   13629 			delay(10 * 1000);
   13630 		}
   13631 		reg = CSR_READ(sc, WMREG_H2ME);
   13632 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13633 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13634 
   13635 		return;
   13636 	}
   13637 
   13638 	/* Acquire semaphore */
   13639 	sc->phy.acquire(sc);
   13640 
   13641 	/* Toggle LANPHYPC */
   13642 	wm_toggle_lanphypc_pch_lpt(sc);
   13643 
   13644 	/* Unforce SMBus mode in PHY */
   13645 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13646 	if (reg == 0x0000 || reg == 0xffff) {
   13647 		uint32_t reg2;
   13648 
   13649 		printf("%s: Force SMBus first.\n", __func__);
   13650 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13651 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13652 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13653 		delay(50 * 1000);
   13654 
   13655 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13656 	}
   13657 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13658 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13659 
   13660 	/* Unforce SMBus mode in MAC */
   13661 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13662 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13663 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13664 
   13665 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13666 	reg |= HV_PM_CTRL_K1_ENA;
   13667 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13668 
   13669 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13670 	reg &= ~(I218_ULP_CONFIG1_IND
   13671 	    | I218_ULP_CONFIG1_STICKY_ULP
   13672 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13673 	    | I218_ULP_CONFIG1_WOL_HOST
   13674 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13675 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13676 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13677 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13678 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13679 	reg |= I218_ULP_CONFIG1_START;
   13680 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13681 
   13682 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13683 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13684 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13685 
   13686 	/* Release semaphore */
   13687 	sc->phy.release(sc);
   13688 	wm_gmii_reset(sc);
   13689 	delay(50 * 1000);
   13690 }
   13691 
   13692 /* WOL in the newer chipset interfaces (pchlan) */
   13693 static void
   13694 wm_enable_phy_wakeup(struct wm_softc *sc)
   13695 {
   13696 #if 0
   13697 	uint16_t preg;
   13698 
   13699 	/* Copy MAC RARs to PHY RARs */
   13700 
   13701 	/* Copy MAC MTA to PHY MTA */
   13702 
   13703 	/* Configure PHY Rx Control register */
   13704 
   13705 	/* Enable PHY wakeup in MAC register */
   13706 
   13707 	/* Configure and enable PHY wakeup in PHY registers */
   13708 
   13709 	/* Activate PHY wakeup */
   13710 
   13711 	/* XXX */
   13712 #endif
   13713 }
   13714 
   13715 /* Power down workaround on D3 */
   13716 static void
   13717 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13718 {
   13719 	uint32_t reg;
   13720 	int i;
   13721 
   13722 	for (i = 0; i < 2; i++) {
   13723 		/* Disable link */
   13724 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13725 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13726 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13727 
   13728 		/*
   13729 		 * Call gig speed drop workaround on Gig disable before
   13730 		 * accessing any PHY registers
   13731 		 */
   13732 		if (sc->sc_type == WM_T_ICH8)
   13733 			wm_gig_downshift_workaround_ich8lan(sc);
   13734 
   13735 		/* Write VR power-down enable */
   13736 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13737 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13738 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13739 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13740 
   13741 		/* Read it back and test */
   13742 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13743 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13744 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13745 			break;
   13746 
   13747 		/* Issue PHY reset and repeat at most one more time */
   13748 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13749 	}
   13750 }
   13751 
   13752 static void
   13753 wm_enable_wakeup(struct wm_softc *sc)
   13754 {
   13755 	uint32_t reg, pmreg;
   13756 	pcireg_t pmode;
   13757 
   13758 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13759 		device_xname(sc->sc_dev), __func__));
   13760 
   13761 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13762 		&pmreg, NULL) == 0)
   13763 		return;
   13764 
   13765 	/* Advertise the wakeup capability */
   13766 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13767 	    | CTRL_SWDPIN(3));
   13768 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13769 
   13770 	/* ICH workaround */
   13771 	switch (sc->sc_type) {
   13772 	case WM_T_ICH8:
   13773 	case WM_T_ICH9:
   13774 	case WM_T_ICH10:
   13775 	case WM_T_PCH:
   13776 	case WM_T_PCH2:
   13777 	case WM_T_PCH_LPT:
   13778 	case WM_T_PCH_SPT:
   13779 		/* Disable gig during WOL */
   13780 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13781 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13782 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13783 		if (sc->sc_type == WM_T_PCH)
   13784 			wm_gmii_reset(sc);
   13785 
   13786 		/* Power down workaround */
   13787 		if (sc->sc_phytype == WMPHY_82577) {
   13788 			struct mii_softc *child;
   13789 
   13790 			/* Assume that the PHY is copper */
   13791 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13792 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13793 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13794 				    (768 << 5) | 25, 0x0444); /* magic num */
   13795 		}
   13796 		break;
   13797 	default:
   13798 		break;
   13799 	}
   13800 
   13801 	/* Keep the laser running on fiber adapters */
   13802 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13803 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13804 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13805 		reg |= CTRL_EXT_SWDPIN(3);
   13806 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13807 	}
   13808 
   13809 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13810 #if 0	/* for the multicast packet */
   13811 	reg |= WUFC_MC;
   13812 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13813 #endif
   13814 
   13815 	if (sc->sc_type >= WM_T_PCH)
   13816 		wm_enable_phy_wakeup(sc);
   13817 	else {
   13818 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13819 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13820 	}
   13821 
   13822 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13823 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13824 		|| (sc->sc_type == WM_T_PCH2))
   13825 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13826 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13827 
   13828 	/* Request PME */
   13829 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13830 #if 0
   13831 	/* Disable WOL */
   13832 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13833 #else
   13834 	/* For WOL */
   13835 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13836 #endif
   13837 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13838 }
   13839 
   13840 /* Disable ASPM L0s and/or L1 for workaround */
   13841 static void
   13842 wm_disable_aspm(struct wm_softc *sc)
   13843 {
   13844 	pcireg_t reg, mask = 0;
   13845 	unsigned const char *str = "";
   13846 
   13847 	/*
   13848 	 *  Only for PCIe device which has PCIe capability in the PCI config
   13849 	 * space.
   13850 	 */
   13851 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   13852 		return;
   13853 
   13854 	switch (sc->sc_type) {
   13855 	case WM_T_82571:
   13856 	case WM_T_82572:
   13857 		/*
   13858 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   13859 		 * State Power management L1 State (ASPM L1).
   13860 		 */
   13861 		mask = PCIE_LCSR_ASPM_L1;
   13862 		str = "L1 is";
   13863 		break;
   13864 	case WM_T_82573:
   13865 	case WM_T_82574:
   13866 	case WM_T_82583:
   13867 		/*
   13868 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   13869 		 *
   13870 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   13871 		 * some chipset.  The document of 82574 and 82583 says that
   13872 		 * disabling L0s with some specific chipset is sufficient,
   13873 		 * but we follow as of the Intel em driver does.
   13874 		 *
   13875 		 * References:
   13876 		 * Errata 8 of the Specification Update of i82573.
   13877 		 * Errata 20 of the Specification Update of i82574.
   13878 		 * Errata 9 of the Specification Update of i82583.
   13879 		 */
   13880 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   13881 		str = "L0s and L1 are";
   13882 		break;
   13883 	default:
   13884 		return;
   13885 	}
   13886 
   13887 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13888 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   13889 	reg &= ~mask;
   13890 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13891 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   13892 
   13893 	/* Print only in wm_attach() */
   13894 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   13895 		aprint_verbose_dev(sc->sc_dev,
   13896 		    "ASPM %s disabled to workaround the errata.\n",
   13897 			str);
   13898 }
   13899 
   13900 /* LPLU */
   13901 
   13902 static void
   13903 wm_lplu_d0_disable(struct wm_softc *sc)
   13904 {
   13905 	struct mii_data *mii = &sc->sc_mii;
   13906 	uint32_t reg;
   13907 
   13908 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13909 		device_xname(sc->sc_dev), __func__));
   13910 
   13911 	if (sc->sc_phytype == WMPHY_IFE)
   13912 		return;
   13913 
   13914 	switch (sc->sc_type) {
   13915 	case WM_T_82571:
   13916 	case WM_T_82572:
   13917 	case WM_T_82573:
   13918 	case WM_T_82575:
   13919 	case WM_T_82576:
   13920 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   13921 		reg &= ~PMR_D0_LPLU;
   13922 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   13923 		break;
   13924 	case WM_T_82580:
   13925 	case WM_T_I350:
   13926 	case WM_T_I210:
   13927 	case WM_T_I211:
   13928 		reg = CSR_READ(sc, WMREG_PHPM);
   13929 		reg &= ~PHPM_D0A_LPLU;
   13930 		CSR_WRITE(sc, WMREG_PHPM, reg);
   13931 		break;
   13932 	case WM_T_82574:
   13933 	case WM_T_82583:
   13934 	case WM_T_ICH8:
   13935 	case WM_T_ICH9:
   13936 	case WM_T_ICH10:
   13937 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13938 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13939 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13940 		CSR_WRITE_FLUSH(sc);
   13941 		break;
   13942 	case WM_T_PCH:
   13943 	case WM_T_PCH2:
   13944 	case WM_T_PCH_LPT:
   13945 	case WM_T_PCH_SPT:
   13946 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13947 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13948 		if (wm_phy_resetisblocked(sc) == false)
   13949 			reg |= HV_OEM_BITS_ANEGNOW;
   13950 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13951 		break;
   13952 	default:
   13953 		break;
   13954 	}
   13955 }
   13956 
   13957 /* EEE */
   13958 
   13959 static void
   13960 wm_set_eee_i350(struct wm_softc *sc)
   13961 {
   13962 	uint32_t ipcnfg, eeer;
   13963 
   13964 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13965 	eeer = CSR_READ(sc, WMREG_EEER);
   13966 
   13967 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13968 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13969 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13970 		    | EEER_LPI_FC);
   13971 	} else {
   13972 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13973 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13974 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13975 		    | EEER_LPI_FC);
   13976 	}
   13977 
   13978 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13979 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13980 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13981 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13982 }
   13983 
   13984 /*
   13985  * Workarounds (mainly PHY related).
   13986  * Basically, PHY's workarounds are in the PHY drivers.
   13987  */
   13988 
   13989 /* Work-around for 82566 Kumeran PCS lock loss */
   13990 static void
   13991 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13992 {
   13993 	struct mii_data *mii = &sc->sc_mii;
   13994 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   13995 	int i;
   13996 	int reg;
   13997 
   13998 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13999 		device_xname(sc->sc_dev), __func__));
   14000 
   14001 	/* If the link is not up, do nothing */
   14002 	if ((status & STATUS_LU) == 0)
   14003 		return;
   14004 
   14005 	/* Nothing to do if the link is other than 1Gbps */
   14006 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   14007 		return;
   14008 
   14009 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14010 	for (i = 0; i < 10; i++) {
   14011 		/* read twice */
   14012 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14013 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14014 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   14015 			goto out;	/* GOOD! */
   14016 
   14017 		/* Reset the PHY */
   14018 		wm_reset_phy(sc);
   14019 		delay(5*1000);
   14020 	}
   14021 
   14022 	/* Disable GigE link negotiation */
   14023 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14024 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14025 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14026 
   14027 	/*
   14028 	 * Call gig speed drop workaround on Gig disable before accessing
   14029 	 * any PHY registers.
   14030 	 */
   14031 	wm_gig_downshift_workaround_ich8lan(sc);
   14032 
   14033 out:
   14034 	return;
   14035 }
   14036 
   14037 /* WOL from S5 stops working */
   14038 static void
   14039 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   14040 {
   14041 	uint16_t kmreg;
   14042 
   14043 	/* Only for igp3 */
   14044 	if (sc->sc_phytype == WMPHY_IGP_3) {
   14045 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   14046 			return;
   14047 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   14048 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   14049 			return;
   14050 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   14051 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   14052 	}
   14053 }
   14054 
   14055 /*
   14056  * Workaround for pch's PHYs
   14057  * XXX should be moved to new PHY driver?
   14058  */
   14059 static void
   14060 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   14061 {
   14062 
   14063 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14064 		device_xname(sc->sc_dev), __func__));
   14065 	KASSERT(sc->sc_type == WM_T_PCH);
   14066 
   14067 	if (sc->sc_phytype == WMPHY_82577)
   14068 		wm_set_mdio_slow_mode_hv(sc);
   14069 
   14070 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   14071 
   14072 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   14073 
   14074 	/* 82578 */
   14075 	if (sc->sc_phytype == WMPHY_82578) {
   14076 		struct mii_softc *child;
   14077 
   14078 		/*
   14079 		 * Return registers to default by doing a soft reset then
   14080 		 * writing 0x3140 to the control register
   14081 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   14082 		 */
   14083 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14084 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   14085 			PHY_RESET(child);
   14086 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   14087 			    0x3140);
   14088 		}
   14089 	}
   14090 
   14091 	/* Select page 0 */
   14092 	sc->phy.acquire(sc);
   14093 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   14094 	sc->phy.release(sc);
   14095 
   14096 	/*
   14097 	 * Configure the K1 Si workaround during phy reset assuming there is
   14098 	 * link so that it disables K1 if link is in 1Gbps.
   14099 	 */
   14100 	wm_k1_gig_workaround_hv(sc, 1);
   14101 }
   14102 
   14103 static void
   14104 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   14105 {
   14106 
   14107 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14108 		device_xname(sc->sc_dev), __func__));
   14109 	KASSERT(sc->sc_type == WM_T_PCH2);
   14110 
   14111 	wm_set_mdio_slow_mode_hv(sc);
   14112 }
   14113 
   14114 static int
   14115 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14116 {
   14117 	int k1_enable = sc->sc_nvm_k1_enabled;
   14118 
   14119 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14120 		device_xname(sc->sc_dev), __func__));
   14121 
   14122 	if (sc->phy.acquire(sc) != 0)
   14123 		return -1;
   14124 
   14125 	if (link) {
   14126 		k1_enable = 0;
   14127 
   14128 		/* Link stall fix for link up */
   14129 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   14130 	} else {
   14131 		/* Link stall fix for link down */
   14132 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   14133 	}
   14134 
   14135 	wm_configure_k1_ich8lan(sc, k1_enable);
   14136 	sc->phy.release(sc);
   14137 
   14138 	return 0;
   14139 }
   14140 
   14141 static void
   14142 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14143 {
   14144 	uint32_t reg;
   14145 
   14146 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14147 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14148 	    reg | HV_KMRN_MDIO_SLOW);
   14149 }
   14150 
   14151 static void
   14152 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14153 {
   14154 	uint32_t ctrl, ctrl_ext, tmp;
   14155 	uint16_t kmreg;
   14156 	int rv;
   14157 
   14158 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14159 	if (rv != 0)
   14160 		return;
   14161 
   14162 	if (k1_enable)
   14163 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14164 	else
   14165 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14166 
   14167 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14168 	if (rv != 0)
   14169 		return;
   14170 
   14171 	delay(20);
   14172 
   14173 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14174 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14175 
   14176 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14177 	tmp |= CTRL_FRCSPD;
   14178 
   14179 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14180 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14181 	CSR_WRITE_FLUSH(sc);
   14182 	delay(20);
   14183 
   14184 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14185 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14186 	CSR_WRITE_FLUSH(sc);
   14187 	delay(20);
   14188 
   14189 	return;
   14190 }
   14191 
   14192 /* special case - for 82575 - need to do manual init ... */
   14193 static void
   14194 wm_reset_init_script_82575(struct wm_softc *sc)
   14195 {
   14196 	/*
   14197 	 * remark: this is untested code - we have no board without EEPROM
   14198 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14199 	 */
   14200 
   14201 	/* SerDes configuration via SERDESCTRL */
   14202 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14203 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14204 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14205 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14206 
   14207 	/* CCM configuration via CCMCTL register */
   14208 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14209 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14210 
   14211 	/* PCIe lanes configuration */
   14212 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14213 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14214 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14215 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14216 
   14217 	/* PCIe PLL Configuration */
   14218 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14219 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14220 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14221 }
   14222 
   14223 static void
   14224 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14225 {
   14226 	uint32_t reg;
   14227 	uint16_t nvmword;
   14228 	int rv;
   14229 
   14230 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14231 		return;
   14232 
   14233 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14234 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14235 	if (rv != 0) {
   14236 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14237 		    __func__);
   14238 		return;
   14239 	}
   14240 
   14241 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14242 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14243 		reg |= MDICNFG_DEST;
   14244 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14245 		reg |= MDICNFG_COM_MDIO;
   14246 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14247 }
   14248 
   14249 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14250 
   14251 static bool
   14252 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14253 {
   14254 	int i;
   14255 	uint32_t reg;
   14256 	uint16_t id1, id2;
   14257 
   14258 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14259 		device_xname(sc->sc_dev), __func__));
   14260 	id1 = id2 = 0xffff;
   14261 	for (i = 0; i < 2; i++) {
   14262 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   14263 		if (MII_INVALIDID(id1))
   14264 			continue;
   14265 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   14266 		if (MII_INVALIDID(id2))
   14267 			continue;
   14268 		break;
   14269 	}
   14270 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   14271 		goto out;
   14272 	}
   14273 
   14274 	if (sc->sc_type < WM_T_PCH_LPT) {
   14275 		sc->phy.release(sc);
   14276 		wm_set_mdio_slow_mode_hv(sc);
   14277 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14278 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14279 		sc->phy.acquire(sc);
   14280 	}
   14281 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14282 		printf("XXX return with false\n");
   14283 		return false;
   14284 	}
   14285 out:
   14286 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   14287 		/* Only unforce SMBus if ME is not active */
   14288 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14289 			/* Unforce SMBus mode in PHY */
   14290 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14291 			    CV_SMB_CTRL);
   14292 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14293 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14294 			    CV_SMB_CTRL, reg);
   14295 
   14296 			/* Unforce SMBus mode in MAC */
   14297 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14298 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14299 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14300 		}
   14301 	}
   14302 	return true;
   14303 }
   14304 
   14305 static void
   14306 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14307 {
   14308 	uint32_t reg;
   14309 	int i;
   14310 
   14311 	/* Set PHY Config Counter to 50msec */
   14312 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14313 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14314 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14315 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14316 
   14317 	/* Toggle LANPHYPC */
   14318 	reg = CSR_READ(sc, WMREG_CTRL);
   14319 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14320 	reg &= ~CTRL_LANPHYPC_VALUE;
   14321 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14322 	CSR_WRITE_FLUSH(sc);
   14323 	delay(1000);
   14324 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14325 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14326 	CSR_WRITE_FLUSH(sc);
   14327 
   14328 	if (sc->sc_type < WM_T_PCH_LPT)
   14329 		delay(50 * 1000);
   14330 	else {
   14331 		i = 20;
   14332 
   14333 		do {
   14334 			delay(5 * 1000);
   14335 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14336 		    && i--);
   14337 
   14338 		delay(30 * 1000);
   14339 	}
   14340 }
   14341 
   14342 static int
   14343 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14344 {
   14345 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14346 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14347 	uint32_t rxa;
   14348 	uint16_t scale = 0, lat_enc = 0;
   14349 	int32_t obff_hwm = 0;
   14350 	int64_t lat_ns, value;
   14351 
   14352 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14353 		device_xname(sc->sc_dev), __func__));
   14354 
   14355 	if (link) {
   14356 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14357 		uint32_t status;
   14358 		uint16_t speed;
   14359 		pcireg_t preg;
   14360 
   14361 		status = CSR_READ(sc, WMREG_STATUS);
   14362 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14363 		case STATUS_SPEED_10:
   14364 			speed = 10;
   14365 			break;
   14366 		case STATUS_SPEED_100:
   14367 			speed = 100;
   14368 			break;
   14369 		case STATUS_SPEED_1000:
   14370 			speed = 1000;
   14371 			break;
   14372 		default:
   14373 			device_printf(sc->sc_dev, "Unknown speed "
   14374 			    "(status = %08x)\n", status);
   14375 			return -1;
   14376 		}
   14377 
   14378 		/* Rx Packet Buffer Allocation size (KB) */
   14379 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14380 
   14381 		/*
   14382 		 * Determine the maximum latency tolerated by the device.
   14383 		 *
   14384 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14385 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14386 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14387 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14388 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14389 		 */
   14390 		lat_ns = ((int64_t)rxa * 1024 -
   14391 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14392 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14393 		if (lat_ns < 0)
   14394 			lat_ns = 0;
   14395 		else
   14396 			lat_ns /= speed;
   14397 		value = lat_ns;
   14398 
   14399 		while (value > LTRV_VALUE) {
   14400 			scale ++;
   14401 			value = howmany(value, __BIT(5));
   14402 		}
   14403 		if (scale > LTRV_SCALE_MAX) {
   14404 			printf("%s: Invalid LTR latency scale %d\n",
   14405 			    device_xname(sc->sc_dev), scale);
   14406 			return -1;
   14407 		}
   14408 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14409 
   14410 		/* Determine the maximum latency tolerated by the platform */
   14411 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14412 		    WM_PCI_LTR_CAP_LPT);
   14413 		max_snoop = preg & 0xffff;
   14414 		max_nosnoop = preg >> 16;
   14415 
   14416 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14417 
   14418 		if (lat_enc > max_ltr_enc) {
   14419 			lat_enc = max_ltr_enc;
   14420 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14421 			    * PCI_LTR_SCALETONS(
   14422 				    __SHIFTOUT(lat_enc,
   14423 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14424 		}
   14425 
   14426 		if (lat_ns) {
   14427 			lat_ns *= speed * 1000;
   14428 			lat_ns /= 8;
   14429 			lat_ns /= 1000000000;
   14430 			obff_hwm = (int32_t)(rxa - lat_ns);
   14431 		}
   14432 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14433 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14434 			    "(rxa = %d, lat_ns = %d)\n",
   14435 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14436 			return -1;
   14437 		}
   14438 	}
   14439 	/* Snoop and No-Snoop latencies the same */
   14440 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14441 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14442 
   14443 	/* Set OBFF high water mark */
   14444 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14445 	reg |= obff_hwm;
   14446 	CSR_WRITE(sc, WMREG_SVT, reg);
   14447 
   14448 	/* Enable OBFF */
   14449 	reg = CSR_READ(sc, WMREG_SVCR);
   14450 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14451 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14452 
   14453 	return 0;
   14454 }
   14455 
   14456 /*
   14457  * I210 Errata 25 and I211 Errata 10
   14458  * Slow System Clock.
   14459  */
   14460 static void
   14461 wm_pll_workaround_i210(struct wm_softc *sc)
   14462 {
   14463 	uint32_t mdicnfg, wuc;
   14464 	uint32_t reg;
   14465 	pcireg_t pcireg;
   14466 	uint32_t pmreg;
   14467 	uint16_t nvmword, tmp_nvmword;
   14468 	int phyval;
   14469 	bool wa_done = false;
   14470 	int i;
   14471 
   14472 	/* Save WUC and MDICNFG registers */
   14473 	wuc = CSR_READ(sc, WMREG_WUC);
   14474 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14475 
   14476 	reg = mdicnfg & ~MDICNFG_DEST;
   14477 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14478 
   14479 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14480 		nvmword = INVM_DEFAULT_AL;
   14481 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14482 
   14483 	/* Get Power Management cap offset */
   14484 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14485 		&pmreg, NULL) == 0)
   14486 		return;
   14487 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14488 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14489 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14490 
   14491 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14492 			break; /* OK */
   14493 		}
   14494 
   14495 		wa_done = true;
   14496 		/* Directly reset the internal PHY */
   14497 		reg = CSR_READ(sc, WMREG_CTRL);
   14498 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14499 
   14500 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14501 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14502 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14503 
   14504 		CSR_WRITE(sc, WMREG_WUC, 0);
   14505 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14506 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14507 
   14508 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14509 		    pmreg + PCI_PMCSR);
   14510 		pcireg |= PCI_PMCSR_STATE_D3;
   14511 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14512 		    pmreg + PCI_PMCSR, pcireg);
   14513 		delay(1000);
   14514 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14515 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14516 		    pmreg + PCI_PMCSR, pcireg);
   14517 
   14518 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14519 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14520 
   14521 		/* Restore WUC register */
   14522 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14523 	}
   14524 
   14525 	/* Restore MDICNFG setting */
   14526 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14527 	if (wa_done)
   14528 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14529 }
   14530 
   14531 static void
   14532 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14533 {
   14534 	uint32_t reg;
   14535 
   14536 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14537 		device_xname(sc->sc_dev), __func__));
   14538 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14539 
   14540 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14541 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14542 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14543 
   14544 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14545 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14546 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14547 }
   14548