Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.524
      1 /*	$NetBSD: if_wm.c,v 1.524 2017/07/13 13:27:08 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.524 2017/07/13 13:27:08 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    165 #else
    166 #define CALLOUT_FLAGS	0
    167 #endif
    168 
    169 /*
    170  * This device driver's max interrupt numbers.
    171  */
    172 #define WM_MAX_NQUEUEINTR	16
    173 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    174 
    175 #ifndef WM_DISABLE_MSI
    176 #define	WM_DISABLE_MSI 0
    177 #endif
    178 #ifndef WM_DISABLE_MSIX
    179 #define	WM_DISABLE_MSIX 0
    180 #endif
    181 
    182 int wm_disable_msi = WM_DISABLE_MSI;
    183 int wm_disable_msix = WM_DISABLE_MSIX;
    184 
    185 /*
    186  * Transmit descriptor list size.  Due to errata, we can only have
    187  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    188  * on >= 82544.  We tell the upper layers that they can queue a lot
    189  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    190  * of them at a time.
    191  *
    192  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    193  * chains containing many small mbufs have been observed in zero-copy
    194  * situations with jumbo frames.
    195  */
    196 #define	WM_NTXSEGS		256
    197 #define	WM_IFQUEUELEN		256
    198 #define	WM_TXQUEUELEN_MAX	64
    199 #define	WM_TXQUEUELEN_MAX_82547	16
    200 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    201 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    202 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    203 #define	WM_NTXDESC_82542	256
    204 #define	WM_NTXDESC_82544	4096
    205 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    206 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    207 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    208 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    209 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    210 
    211 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    212 
    213 #define	WM_TXINTERQSIZE		256
    214 
    215 /*
    216  * Receive descriptor list size.  We have one Rx buffer for normal
    217  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    218  * packet.  We allocate 256 receive descriptors, each with a 2k
    219  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    220  */
    221 #define	WM_NRXDESC		256
    222 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    223 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    224 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    225 
    226 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    227 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    228 #endif
    229 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    230 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    231 #endif
    232 
    233 typedef union txdescs {
    234 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    235 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    236 } txdescs_t;
    237 
    238 typedef union rxdescs {
    239 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    240 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    241 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    242 } rxdescs_t;
    243 
    244 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    245 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    246 
    247 /*
    248  * Software state for transmit jobs.
    249  */
    250 struct wm_txsoft {
    251 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    252 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    253 	int txs_firstdesc;		/* first descriptor in packet */
    254 	int txs_lastdesc;		/* last descriptor in packet */
    255 	int txs_ndesc;			/* # of descriptors used */
    256 };
    257 
    258 /*
    259  * Software state for receive buffers.  Each descriptor gets a
    260  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    261  * more than one buffer, we chain them together.
    262  */
    263 struct wm_rxsoft {
    264 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    265 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    266 };
    267 
    268 #define WM_LINKUP_TIMEOUT	50
    269 
    270 static uint16_t swfwphysem[] = {
    271 	SWFW_PHY0_SM,
    272 	SWFW_PHY1_SM,
    273 	SWFW_PHY2_SM,
    274 	SWFW_PHY3_SM
    275 };
    276 
    277 static const uint32_t wm_82580_rxpbs_table[] = {
    278 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    279 };
    280 
    281 struct wm_softc;
    282 
    283 #ifdef WM_EVENT_COUNTERS
    284 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    285 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    286 	struct evcnt qname##_ev_##evname;
    287 
    288 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    289 	do{								\
    290 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    291 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    292 		    "%s%02d%s", #qname, (qnum), #evname);		\
    293 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    294 		    (evtype), NULL, (xname),				\
    295 		    (q)->qname##_##evname##_evcnt_name);		\
    296 	}while(0)
    297 
    298 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    299 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    300 
    301 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    302 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    303 
    304 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    305 	evcnt_detach(&(q)->qname##_ev_##evname);
    306 #endif /* WM_EVENT_COUNTERS */
    307 
    308 struct wm_txqueue {
    309 	kmutex_t *txq_lock;		/* lock for tx operations */
    310 
    311 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    312 
    313 	/* Software state for the transmit descriptors. */
    314 	int txq_num;			/* must be a power of two */
    315 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    316 
    317 	/* TX control data structures. */
    318 	int txq_ndesc;			/* must be a power of two */
    319 	size_t txq_descsize;		/* a tx descriptor size */
    320 	txdescs_t *txq_descs_u;
    321         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    322 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    323 	int txq_desc_rseg;		/* real number of control segment */
    324 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    325 #define	txq_descs	txq_descs_u->sctxu_txdescs
    326 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    327 
    328 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    329 
    330 	int txq_free;			/* number of free Tx descriptors */
    331 	int txq_next;			/* next ready Tx descriptor */
    332 
    333 	int txq_sfree;			/* number of free Tx jobs */
    334 	int txq_snext;			/* next free Tx job */
    335 	int txq_sdirty;			/* dirty Tx jobs */
    336 
    337 	/* These 4 variables are used only on the 82547. */
    338 	int txq_fifo_size;		/* Tx FIFO size */
    339 	int txq_fifo_head;		/* current head of FIFO */
    340 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    341 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    342 
    343 	/*
    344 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    345 	 * CPUs. This queue intermediate them without block.
    346 	 */
    347 	pcq_t *txq_interq;
    348 
    349 	/*
    350 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    351 	 * to manage Tx H/W queue's busy flag.
    352 	 */
    353 	int txq_flags;			/* flags for H/W queue, see below */
    354 #define	WM_TXQ_NO_SPACE	0x1
    355 
    356 	bool txq_stopping;
    357 
    358 	uint32_t txq_packets;		/* for AIM */
    359 	uint32_t txq_bytes;		/* for AIM */
    360 #ifdef WM_EVENT_COUNTERS
    361 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    362 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    363 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    364 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    365 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    366 						/* XXX not used? */
    367 
    368 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    369 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    374 
    375 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    376 
    377 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    378 
    379 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    380 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    381 #endif /* WM_EVENT_COUNTERS */
    382 };
    383 
    384 struct wm_rxqueue {
    385 	kmutex_t *rxq_lock;		/* lock for rx operations */
    386 
    387 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    388 
    389 	/* Software state for the receive descriptors. */
    390 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    391 
    392 	/* RX control data structures. */
    393 	int rxq_ndesc;			/* must be a power of two */
    394 	size_t rxq_descsize;		/* a rx descriptor size */
    395 	rxdescs_t *rxq_descs_u;
    396 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    397 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    398 	int rxq_desc_rseg;		/* real number of control segment */
    399 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    400 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    401 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    402 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    403 
    404 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    405 
    406 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    407 	int rxq_discard;
    408 	int rxq_len;
    409 	struct mbuf *rxq_head;
    410 	struct mbuf *rxq_tail;
    411 	struct mbuf **rxq_tailp;
    412 
    413 	bool rxq_stopping;
    414 
    415 	uint32_t rxq_packets;		/* for AIM */
    416 	uint32_t rxq_bytes;		/* for AIM */
    417 #ifdef WM_EVENT_COUNTERS
    418 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    419 
    420 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    421 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    422 #endif
    423 };
    424 
    425 struct wm_queue {
    426 	int wmq_id;			/* index of transmit and receive queues */
    427 	int wmq_intr_idx;		/* index of MSI-X tables */
    428 
    429 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    430 	bool wmq_set_itr;
    431 
    432 	struct wm_txqueue wmq_txq;
    433 	struct wm_rxqueue wmq_rxq;
    434 
    435 	void *wmq_si;
    436 };
    437 
    438 struct wm_phyop {
    439 	int (*acquire)(struct wm_softc *);
    440 	void (*release)(struct wm_softc *);
    441 	int reset_delay_us;
    442 };
    443 
    444 /*
    445  * Software state per device.
    446  */
    447 struct wm_softc {
    448 	device_t sc_dev;		/* generic device information */
    449 	bus_space_tag_t sc_st;		/* bus space tag */
    450 	bus_space_handle_t sc_sh;	/* bus space handle */
    451 	bus_size_t sc_ss;		/* bus space size */
    452 	bus_space_tag_t sc_iot;		/* I/O space tag */
    453 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    454 	bus_size_t sc_ios;		/* I/O space size */
    455 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    456 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    457 	bus_size_t sc_flashs;		/* flash registers space size */
    458 	off_t sc_flashreg_offset;	/*
    459 					 * offset to flash registers from
    460 					 * start of BAR
    461 					 */
    462 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    463 
    464 	struct ethercom sc_ethercom;	/* ethernet common data */
    465 	struct mii_data sc_mii;		/* MII/media information */
    466 
    467 	pci_chipset_tag_t sc_pc;
    468 	pcitag_t sc_pcitag;
    469 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    470 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    471 
    472 	uint16_t sc_pcidevid;		/* PCI device ID */
    473 	wm_chip_type sc_type;		/* MAC type */
    474 	int sc_rev;			/* MAC revision */
    475 	wm_phy_type sc_phytype;		/* PHY type */
    476 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    477 #define	WM_MEDIATYPE_UNKNOWN		0x00
    478 #define	WM_MEDIATYPE_FIBER		0x01
    479 #define	WM_MEDIATYPE_COPPER		0x02
    480 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    481 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    482 	int sc_flags;			/* flags; see below */
    483 	int sc_if_flags;		/* last if_flags */
    484 	int sc_flowflags;		/* 802.3x flow control flags */
    485 	int sc_align_tweak;
    486 
    487 	void *sc_ihs[WM_MAX_NINTR];	/*
    488 					 * interrupt cookie.
    489 					 * - legacy and msi use sc_ihs[0] only
    490 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    491 					 */
    492 	pci_intr_handle_t *sc_intrs;	/*
    493 					 * legacy and msi use sc_intrs[0] only
    494 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    495 					 */
    496 	int sc_nintrs;			/* number of interrupts */
    497 
    498 	int sc_link_intr_idx;		/* index of MSI-X tables */
    499 
    500 	callout_t sc_tick_ch;		/* tick callout */
    501 	bool sc_core_stopping;
    502 
    503 	int sc_nvm_ver_major;
    504 	int sc_nvm_ver_minor;
    505 	int sc_nvm_ver_build;
    506 	int sc_nvm_addrbits;		/* NVM address bits */
    507 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    508 	int sc_ich8_flash_base;
    509 	int sc_ich8_flash_bank_size;
    510 	int sc_nvm_k1_enabled;
    511 
    512 	int sc_nqueues;
    513 	struct wm_queue *sc_queue;
    514 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    515 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    516 
    517 	int sc_affinity_offset;
    518 
    519 #ifdef WM_EVENT_COUNTERS
    520 	/* Event counters. */
    521 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    522 
    523         /* WM_T_82542_2_1 only */
    524 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    525 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    526 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    527 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    528 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    529 #endif /* WM_EVENT_COUNTERS */
    530 
    531 	/* This variable are used only on the 82547. */
    532 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    533 
    534 	uint32_t sc_ctrl;		/* prototype CTRL register */
    535 #if 0
    536 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    537 #endif
    538 	uint32_t sc_icr;		/* prototype interrupt bits */
    539 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    540 	uint32_t sc_tctl;		/* prototype TCTL register */
    541 	uint32_t sc_rctl;		/* prototype RCTL register */
    542 	uint32_t sc_txcw;		/* prototype TXCW register */
    543 	uint32_t sc_tipg;		/* prototype TIPG register */
    544 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    545 	uint32_t sc_pba;		/* prototype PBA register */
    546 
    547 	int sc_tbi_linkup;		/* TBI link status */
    548 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    549 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    550 
    551 	int sc_mchash_type;		/* multicast filter offset */
    552 
    553 	krndsource_t rnd_source;	/* random source */
    554 
    555 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    556 
    557 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    558 	kmutex_t *sc_ich_phymtx;	/*
    559 					 * 82574/82583/ICH/PCH specific PHY
    560 					 * mutex. For 82574/82583, the mutex
    561 					 * is used for both PHY and NVM.
    562 					 */
    563 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    564 
    565 	struct wm_phyop phy;
    566 };
    567 
    568 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    569 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    570 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    571 
    572 #define	WM_RXCHAIN_RESET(rxq)						\
    573 do {									\
    574 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    575 	*(rxq)->rxq_tailp = NULL;					\
    576 	(rxq)->rxq_len = 0;						\
    577 } while (/*CONSTCOND*/0)
    578 
    579 #define	WM_RXCHAIN_LINK(rxq, m)						\
    580 do {									\
    581 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    582 	(rxq)->rxq_tailp = &(m)->m_next;				\
    583 } while (/*CONSTCOND*/0)
    584 
    585 #ifdef WM_EVENT_COUNTERS
    586 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    587 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    588 
    589 #define WM_Q_EVCNT_INCR(qname, evname)			\
    590 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    591 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    592 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    593 #else /* !WM_EVENT_COUNTERS */
    594 #define	WM_EVCNT_INCR(ev)	/* nothing */
    595 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    596 
    597 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    598 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    599 #endif /* !WM_EVENT_COUNTERS */
    600 
    601 #define	CSR_READ(sc, reg)						\
    602 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    603 #define	CSR_WRITE(sc, reg, val)						\
    604 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    605 #define	CSR_WRITE_FLUSH(sc)						\
    606 	(void) CSR_READ((sc), WMREG_STATUS)
    607 
    608 #define ICH8_FLASH_READ32(sc, reg)					\
    609 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    610 	    (reg) + sc->sc_flashreg_offset)
    611 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    612 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    613 	    (reg) + sc->sc_flashreg_offset, (data))
    614 
    615 #define ICH8_FLASH_READ16(sc, reg)					\
    616 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    617 	    (reg) + sc->sc_flashreg_offset)
    618 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    619 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    620 	    (reg) + sc->sc_flashreg_offset, (data))
    621 
    622 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    623 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    624 
    625 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    626 #define	WM_CDTXADDR_HI(txq, x)						\
    627 	(sizeof(bus_addr_t) == 8 ?					\
    628 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    629 
    630 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    631 #define	WM_CDRXADDR_HI(rxq, x)						\
    632 	(sizeof(bus_addr_t) == 8 ?					\
    633 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    634 
    635 /*
    636  * Register read/write functions.
    637  * Other than CSR_{READ|WRITE}().
    638  */
    639 #if 0
    640 static inline uint32_t wm_io_read(struct wm_softc *, int);
    641 #endif
    642 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    643 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    644 	uint32_t, uint32_t);
    645 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    646 
    647 /*
    648  * Descriptor sync/init functions.
    649  */
    650 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    651 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    652 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    653 
    654 /*
    655  * Device driver interface functions and commonly used functions.
    656  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    657  */
    658 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    659 static int	wm_match(device_t, cfdata_t, void *);
    660 static void	wm_attach(device_t, device_t, void *);
    661 static int	wm_detach(device_t, int);
    662 static bool	wm_suspend(device_t, const pmf_qual_t *);
    663 static bool	wm_resume(device_t, const pmf_qual_t *);
    664 static void	wm_watchdog(struct ifnet *);
    665 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    666 static void	wm_tick(void *);
    667 static int	wm_ifflags_cb(struct ethercom *);
    668 static int	wm_ioctl(struct ifnet *, u_long, void *);
    669 /* MAC address related */
    670 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    671 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    672 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    673 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    674 static void	wm_set_filter(struct wm_softc *);
    675 /* Reset and init related */
    676 static void	wm_set_vlan(struct wm_softc *);
    677 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    678 static void	wm_get_auto_rd_done(struct wm_softc *);
    679 static void	wm_lan_init_done(struct wm_softc *);
    680 static void	wm_get_cfg_done(struct wm_softc *);
    681 static void	wm_phy_post_reset(struct wm_softc *);
    682 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    683 static void	wm_initialize_hardware_bits(struct wm_softc *);
    684 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    685 static void	wm_reset_phy(struct wm_softc *);
    686 static void	wm_flush_desc_rings(struct wm_softc *);
    687 static void	wm_reset(struct wm_softc *);
    688 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    689 static void	wm_rxdrain(struct wm_rxqueue *);
    690 static void	wm_rss_getkey(uint8_t *);
    691 static void	wm_init_rss(struct wm_softc *);
    692 static void	wm_adjust_qnum(struct wm_softc *, int);
    693 static inline bool	wm_is_using_msix(struct wm_softc *);
    694 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    695 static int	wm_softint_establish(struct wm_softc *, int, int);
    696 static int	wm_setup_legacy(struct wm_softc *);
    697 static int	wm_setup_msix(struct wm_softc *);
    698 static int	wm_init(struct ifnet *);
    699 static int	wm_init_locked(struct ifnet *);
    700 static void	wm_turnon(struct wm_softc *);
    701 static void	wm_turnoff(struct wm_softc *);
    702 static void	wm_stop(struct ifnet *, int);
    703 static void	wm_stop_locked(struct ifnet *, int);
    704 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    705 static void	wm_82547_txfifo_stall(void *);
    706 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    707 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    708 /* DMA related */
    709 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    710 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    711 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    712 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    713     struct wm_txqueue *);
    714 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    715 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    716 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    717     struct wm_rxqueue *);
    718 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    719 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    720 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    721 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    722 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    723 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    724 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    725     struct wm_txqueue *);
    726 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    727     struct wm_rxqueue *);
    728 static int	wm_alloc_txrx_queues(struct wm_softc *);
    729 static void	wm_free_txrx_queues(struct wm_softc *);
    730 static int	wm_init_txrx_queues(struct wm_softc *);
    731 /* Start */
    732 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    733     struct wm_txsoft *, uint32_t *, uint8_t *);
    734 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    735 static void	wm_start(struct ifnet *);
    736 static void	wm_start_locked(struct ifnet *);
    737 static int	wm_transmit(struct ifnet *, struct mbuf *);
    738 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    739 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    740 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    741     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    742 static void	wm_nq_start(struct ifnet *);
    743 static void	wm_nq_start_locked(struct ifnet *);
    744 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    745 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    746 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    747 static void	wm_deferred_start_locked(struct wm_txqueue *);
    748 static void	wm_handle_queue(void *);
    749 /* Interrupt */
    750 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    751 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    752 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    753 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    754 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    755 static void	wm_linkintr(struct wm_softc *, uint32_t);
    756 static int	wm_intr_legacy(void *);
    757 static inline void	wm_txrxintr_disable(struct wm_queue *);
    758 static inline void	wm_txrxintr_enable(struct wm_queue *);
    759 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    760 static int	wm_txrxintr_msix(void *);
    761 static int	wm_linkintr_msix(void *);
    762 
    763 /*
    764  * Media related.
    765  * GMII, SGMII, TBI, SERDES and SFP.
    766  */
    767 /* Common */
    768 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    769 /* GMII related */
    770 static void	wm_gmii_reset(struct wm_softc *);
    771 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    772 static int	wm_get_phy_id_82575(struct wm_softc *);
    773 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    774 static int	wm_gmii_mediachange(struct ifnet *);
    775 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    776 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    777 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    778 static int	wm_gmii_i82543_readreg(device_t, int, int);
    779 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    780 static int	wm_gmii_mdic_readreg(device_t, int, int);
    781 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    782 static int	wm_gmii_i82544_readreg(device_t, int, int);
    783 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    784 static int	wm_gmii_i80003_readreg(device_t, int, int);
    785 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    786 static int	wm_gmii_bm_readreg(device_t, int, int);
    787 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    788 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    789 static int	wm_gmii_hv_readreg(device_t, int, int);
    790 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    791 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    792 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    793 static int	wm_gmii_82580_readreg(device_t, int, int);
    794 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    795 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    796 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    797 static void	wm_gmii_statchg(struct ifnet *);
    798 /*
    799  * kumeran related (80003, ICH* and PCH*).
    800  * These functions are not for accessing MII registers but for accessing
    801  * kumeran specific registers.
    802  */
    803 static int	wm_kmrn_readreg(struct wm_softc *, int);
    804 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    805 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    806 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    807 /* SGMII */
    808 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    809 static int	wm_sgmii_readreg(device_t, int, int);
    810 static void	wm_sgmii_writereg(device_t, int, int, int);
    811 /* TBI related */
    812 static void	wm_tbi_mediainit(struct wm_softc *);
    813 static int	wm_tbi_mediachange(struct ifnet *);
    814 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    815 static int	wm_check_for_link(struct wm_softc *);
    816 static void	wm_tbi_tick(struct wm_softc *);
    817 /* SERDES related */
    818 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    819 static int	wm_serdes_mediachange(struct ifnet *);
    820 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    821 static void	wm_serdes_tick(struct wm_softc *);
    822 /* SFP related */
    823 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    824 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    825 
    826 /*
    827  * NVM related.
    828  * Microwire, SPI (w/wo EERD) and Flash.
    829  */
    830 /* Misc functions */
    831 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    832 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    833 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    834 /* Microwire */
    835 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    836 /* SPI */
    837 static int	wm_nvm_ready_spi(struct wm_softc *);
    838 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    839 /* Using with EERD */
    840 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    841 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    842 /* Flash */
    843 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    844     unsigned int *);
    845 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    846 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    847 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    848 	uint32_t *);
    849 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    850 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    851 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    852 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    853 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    854 /* iNVM */
    855 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    856 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    857 /* Lock, detecting NVM type, validate checksum and read */
    858 static int	wm_nvm_acquire(struct wm_softc *);
    859 static void	wm_nvm_release(struct wm_softc *);
    860 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    861 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    862 static int	wm_nvm_validate_checksum(struct wm_softc *);
    863 static void	wm_nvm_version_invm(struct wm_softc *);
    864 static void	wm_nvm_version(struct wm_softc *);
    865 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    866 
    867 /*
    868  * Hardware semaphores.
    869  * Very complexed...
    870  */
    871 static int	wm_get_null(struct wm_softc *);
    872 static void	wm_put_null(struct wm_softc *);
    873 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    874 static void	wm_put_swsm_semaphore(struct wm_softc *);
    875 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    876 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    877 static int	wm_get_phy_82575(struct wm_softc *);
    878 static void	wm_put_phy_82575(struct wm_softc *);
    879 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    880 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    881 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    882 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    883 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    884 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    885 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    886 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    887 
    888 /*
    889  * Management mode and power management related subroutines.
    890  * BMC, AMT, suspend/resume and EEE.
    891  */
    892 #if 0
    893 static int	wm_check_mng_mode(struct wm_softc *);
    894 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    895 static int	wm_check_mng_mode_82574(struct wm_softc *);
    896 static int	wm_check_mng_mode_generic(struct wm_softc *);
    897 #endif
    898 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    899 static bool	wm_phy_resetisblocked(struct wm_softc *);
    900 static void	wm_get_hw_control(struct wm_softc *);
    901 static void	wm_release_hw_control(struct wm_softc *);
    902 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    903 static void	wm_smbustopci(struct wm_softc *);
    904 static void	wm_init_manageability(struct wm_softc *);
    905 static void	wm_release_manageability(struct wm_softc *);
    906 static void	wm_get_wakeup(struct wm_softc *);
    907 static void	wm_ulp_disable(struct wm_softc *);
    908 static void	wm_enable_phy_wakeup(struct wm_softc *);
    909 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    910 static void	wm_enable_wakeup(struct wm_softc *);
    911 /* LPLU (Low Power Link Up) */
    912 static void	wm_lplu_d0_disable(struct wm_softc *);
    913 /* EEE */
    914 static void	wm_set_eee_i350(struct wm_softc *);
    915 
    916 /*
    917  * Workarounds (mainly PHY related).
    918  * Basically, PHY's workarounds are in the PHY drivers.
    919  */
    920 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    921 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    922 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    923 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    924 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    925 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    926 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    927 static void	wm_reset_init_script_82575(struct wm_softc *);
    928 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    929 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    930 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    931 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    932 static void	wm_pll_workaround_i210(struct wm_softc *);
    933 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    934 
    935 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    936     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    937 
    938 /*
    939  * Devices supported by this driver.
    940  */
    941 static const struct wm_product {
    942 	pci_vendor_id_t		wmp_vendor;
    943 	pci_product_id_t	wmp_product;
    944 	const char		*wmp_name;
    945 	wm_chip_type		wmp_type;
    946 	uint32_t		wmp_flags;
    947 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    948 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    949 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    950 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    951 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    952 } wm_products[] = {
    953 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    954 	  "Intel i82542 1000BASE-X Ethernet",
    955 	  WM_T_82542_2_1,	WMP_F_FIBER },
    956 
    957 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    958 	  "Intel i82543GC 1000BASE-X Ethernet",
    959 	  WM_T_82543,		WMP_F_FIBER },
    960 
    961 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    962 	  "Intel i82543GC 1000BASE-T Ethernet",
    963 	  WM_T_82543,		WMP_F_COPPER },
    964 
    965 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    966 	  "Intel i82544EI 1000BASE-T Ethernet",
    967 	  WM_T_82544,		WMP_F_COPPER },
    968 
    969 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    970 	  "Intel i82544EI 1000BASE-X Ethernet",
    971 	  WM_T_82544,		WMP_F_FIBER },
    972 
    973 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    974 	  "Intel i82544GC 1000BASE-T Ethernet",
    975 	  WM_T_82544,		WMP_F_COPPER },
    976 
    977 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    978 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    979 	  WM_T_82544,		WMP_F_COPPER },
    980 
    981 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    982 	  "Intel i82540EM 1000BASE-T Ethernet",
    983 	  WM_T_82540,		WMP_F_COPPER },
    984 
    985 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    986 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    987 	  WM_T_82540,		WMP_F_COPPER },
    988 
    989 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    990 	  "Intel i82540EP 1000BASE-T Ethernet",
    991 	  WM_T_82540,		WMP_F_COPPER },
    992 
    993 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    994 	  "Intel i82540EP 1000BASE-T Ethernet",
    995 	  WM_T_82540,		WMP_F_COPPER },
    996 
    997 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    998 	  "Intel i82540EP 1000BASE-T Ethernet",
    999 	  WM_T_82540,		WMP_F_COPPER },
   1000 
   1001 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1002 	  "Intel i82545EM 1000BASE-T Ethernet",
   1003 	  WM_T_82545,		WMP_F_COPPER },
   1004 
   1005 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1006 	  "Intel i82545GM 1000BASE-T Ethernet",
   1007 	  WM_T_82545_3,		WMP_F_COPPER },
   1008 
   1009 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1010 	  "Intel i82545GM 1000BASE-X Ethernet",
   1011 	  WM_T_82545_3,		WMP_F_FIBER },
   1012 
   1013 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1014 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1015 	  WM_T_82545_3,		WMP_F_SERDES },
   1016 
   1017 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1018 	  "Intel i82546EB 1000BASE-T Ethernet",
   1019 	  WM_T_82546,		WMP_F_COPPER },
   1020 
   1021 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1022 	  "Intel i82546EB 1000BASE-T Ethernet",
   1023 	  WM_T_82546,		WMP_F_COPPER },
   1024 
   1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1026 	  "Intel i82545EM 1000BASE-X Ethernet",
   1027 	  WM_T_82545,		WMP_F_FIBER },
   1028 
   1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1030 	  "Intel i82546EB 1000BASE-X Ethernet",
   1031 	  WM_T_82546,		WMP_F_FIBER },
   1032 
   1033 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1034 	  "Intel i82546GB 1000BASE-T Ethernet",
   1035 	  WM_T_82546_3,		WMP_F_COPPER },
   1036 
   1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1038 	  "Intel i82546GB 1000BASE-X Ethernet",
   1039 	  WM_T_82546_3,		WMP_F_FIBER },
   1040 
   1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1042 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1043 	  WM_T_82546_3,		WMP_F_SERDES },
   1044 
   1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1046 	  "i82546GB quad-port Gigabit Ethernet",
   1047 	  WM_T_82546_3,		WMP_F_COPPER },
   1048 
   1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1050 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1051 	  WM_T_82546_3,		WMP_F_COPPER },
   1052 
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1054 	  "Intel PRO/1000MT (82546GB)",
   1055 	  WM_T_82546_3,		WMP_F_COPPER },
   1056 
   1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1058 	  "Intel i82541EI 1000BASE-T Ethernet",
   1059 	  WM_T_82541,		WMP_F_COPPER },
   1060 
   1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1062 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1063 	  WM_T_82541,		WMP_F_COPPER },
   1064 
   1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1066 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1067 	  WM_T_82541,		WMP_F_COPPER },
   1068 
   1069 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1070 	  "Intel i82541ER 1000BASE-T Ethernet",
   1071 	  WM_T_82541_2,		WMP_F_COPPER },
   1072 
   1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1074 	  "Intel i82541GI 1000BASE-T Ethernet",
   1075 	  WM_T_82541_2,		WMP_F_COPPER },
   1076 
   1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1078 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1079 	  WM_T_82541_2,		WMP_F_COPPER },
   1080 
   1081 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1082 	  "Intel i82541PI 1000BASE-T Ethernet",
   1083 	  WM_T_82541_2,		WMP_F_COPPER },
   1084 
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1086 	  "Intel i82547EI 1000BASE-T Ethernet",
   1087 	  WM_T_82547,		WMP_F_COPPER },
   1088 
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1090 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1091 	  WM_T_82547,		WMP_F_COPPER },
   1092 
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1094 	  "Intel i82547GI 1000BASE-T Ethernet",
   1095 	  WM_T_82547_2,		WMP_F_COPPER },
   1096 
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1098 	  "Intel PRO/1000 PT (82571EB)",
   1099 	  WM_T_82571,		WMP_F_COPPER },
   1100 
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1102 	  "Intel PRO/1000 PF (82571EB)",
   1103 	  WM_T_82571,		WMP_F_FIBER },
   1104 
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1106 	  "Intel PRO/1000 PB (82571EB)",
   1107 	  WM_T_82571,		WMP_F_SERDES },
   1108 
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1110 	  "Intel PRO/1000 QT (82571EB)",
   1111 	  WM_T_82571,		WMP_F_COPPER },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1114 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1115 	  WM_T_82571,		WMP_F_COPPER, },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1118 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1119 	  WM_T_82571,		WMP_F_COPPER, },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1122 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1123 	  WM_T_82571,		WMP_F_SERDES, },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1126 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1127 	  WM_T_82571,		WMP_F_SERDES, },
   1128 
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1130 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1131 	  WM_T_82571,		WMP_F_FIBER, },
   1132 
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1134 	  "Intel i82572EI 1000baseT Ethernet",
   1135 	  WM_T_82572,		WMP_F_COPPER },
   1136 
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1138 	  "Intel i82572EI 1000baseX Ethernet",
   1139 	  WM_T_82572,		WMP_F_FIBER },
   1140 
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1142 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1143 	  WM_T_82572,		WMP_F_SERDES },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1146 	  "Intel i82572EI 1000baseT Ethernet",
   1147 	  WM_T_82572,		WMP_F_COPPER },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1150 	  "Intel i82573E",
   1151 	  WM_T_82573,		WMP_F_COPPER },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1154 	  "Intel i82573E IAMT",
   1155 	  WM_T_82573,		WMP_F_COPPER },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1158 	  "Intel i82573L Gigabit Ethernet",
   1159 	  WM_T_82573,		WMP_F_COPPER },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1162 	  "Intel i82574L",
   1163 	  WM_T_82574,		WMP_F_COPPER },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1166 	  "Intel i82574L",
   1167 	  WM_T_82574,		WMP_F_COPPER },
   1168 
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1170 	  "Intel i82583V",
   1171 	  WM_T_82583,		WMP_F_COPPER },
   1172 
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1174 	  "i80003 dual 1000baseT Ethernet",
   1175 	  WM_T_80003,		WMP_F_COPPER },
   1176 
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1178 	  "i80003 dual 1000baseX Ethernet",
   1179 	  WM_T_80003,		WMP_F_COPPER },
   1180 
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1182 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1183 	  WM_T_80003,		WMP_F_SERDES },
   1184 
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1186 	  "Intel i80003 1000baseT Ethernet",
   1187 	  WM_T_80003,		WMP_F_COPPER },
   1188 
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1190 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1191 	  WM_T_80003,		WMP_F_SERDES },
   1192 
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1194 	  "Intel i82801H (M_AMT) LAN Controller",
   1195 	  WM_T_ICH8,		WMP_F_COPPER },
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1197 	  "Intel i82801H (AMT) LAN Controller",
   1198 	  WM_T_ICH8,		WMP_F_COPPER },
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1200 	  "Intel i82801H LAN Controller",
   1201 	  WM_T_ICH8,		WMP_F_COPPER },
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1203 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1204 	  WM_T_ICH8,		WMP_F_COPPER },
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1206 	  "Intel i82801H (M) LAN Controller",
   1207 	  WM_T_ICH8,		WMP_F_COPPER },
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1209 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1210 	  WM_T_ICH8,		WMP_F_COPPER },
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1212 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1213 	  WM_T_ICH8,		WMP_F_COPPER },
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1215 	  "82567V-3 LAN Controller",
   1216 	  WM_T_ICH8,		WMP_F_COPPER },
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1218 	  "82801I (AMT) LAN Controller",
   1219 	  WM_T_ICH9,		WMP_F_COPPER },
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1221 	  "82801I 10/100 LAN Controller",
   1222 	  WM_T_ICH9,		WMP_F_COPPER },
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1224 	  "82801I (G) 10/100 LAN Controller",
   1225 	  WM_T_ICH9,		WMP_F_COPPER },
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1227 	  "82801I (GT) 10/100 LAN Controller",
   1228 	  WM_T_ICH9,		WMP_F_COPPER },
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1230 	  "82801I (C) LAN Controller",
   1231 	  WM_T_ICH9,		WMP_F_COPPER },
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1233 	  "82801I mobile LAN Controller",
   1234 	  WM_T_ICH9,		WMP_F_COPPER },
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1236 	  "82801I mobile (V) LAN Controller",
   1237 	  WM_T_ICH9,		WMP_F_COPPER },
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1239 	  "82801I mobile (AMT) LAN Controller",
   1240 	  WM_T_ICH9,		WMP_F_COPPER },
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1242 	  "82567LM-4 LAN Controller",
   1243 	  WM_T_ICH9,		WMP_F_COPPER },
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1245 	  "82567LM-2 LAN Controller",
   1246 	  WM_T_ICH10,		WMP_F_COPPER },
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1248 	  "82567LF-2 LAN Controller",
   1249 	  WM_T_ICH10,		WMP_F_COPPER },
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1251 	  "82567LM-3 LAN Controller",
   1252 	  WM_T_ICH10,		WMP_F_COPPER },
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1254 	  "82567LF-3 LAN Controller",
   1255 	  WM_T_ICH10,		WMP_F_COPPER },
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1257 	  "82567V-2 LAN Controller",
   1258 	  WM_T_ICH10,		WMP_F_COPPER },
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1260 	  "82567V-3? LAN Controller",
   1261 	  WM_T_ICH10,		WMP_F_COPPER },
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1263 	  "HANKSVILLE LAN Controller",
   1264 	  WM_T_ICH10,		WMP_F_COPPER },
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1266 	  "PCH LAN (82577LM) Controller",
   1267 	  WM_T_PCH,		WMP_F_COPPER },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1269 	  "PCH LAN (82577LC) Controller",
   1270 	  WM_T_PCH,		WMP_F_COPPER },
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1272 	  "PCH LAN (82578DM) Controller",
   1273 	  WM_T_PCH,		WMP_F_COPPER },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1275 	  "PCH LAN (82578DC) Controller",
   1276 	  WM_T_PCH,		WMP_F_COPPER },
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1278 	  "PCH2 LAN (82579LM) Controller",
   1279 	  WM_T_PCH2,		WMP_F_COPPER },
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1281 	  "PCH2 LAN (82579V) Controller",
   1282 	  WM_T_PCH2,		WMP_F_COPPER },
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1284 	  "82575EB dual-1000baseT Ethernet",
   1285 	  WM_T_82575,		WMP_F_COPPER },
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1287 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1288 	  WM_T_82575,		WMP_F_SERDES },
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1290 	  "82575GB quad-1000baseT Ethernet",
   1291 	  WM_T_82575,		WMP_F_COPPER },
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1293 	  "82575GB quad-1000baseT Ethernet (PM)",
   1294 	  WM_T_82575,		WMP_F_COPPER },
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1296 	  "82576 1000BaseT Ethernet",
   1297 	  WM_T_82576,		WMP_F_COPPER },
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1299 	  "82576 1000BaseX Ethernet",
   1300 	  WM_T_82576,		WMP_F_FIBER },
   1301 
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1303 	  "82576 gigabit Ethernet (SERDES)",
   1304 	  WM_T_82576,		WMP_F_SERDES },
   1305 
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1307 	  "82576 quad-1000BaseT Ethernet",
   1308 	  WM_T_82576,		WMP_F_COPPER },
   1309 
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1311 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1312 	  WM_T_82576,		WMP_F_COPPER },
   1313 
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1315 	  "82576 gigabit Ethernet",
   1316 	  WM_T_82576,		WMP_F_COPPER },
   1317 
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1319 	  "82576 gigabit Ethernet (SERDES)",
   1320 	  WM_T_82576,		WMP_F_SERDES },
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1322 	  "82576 quad-gigabit Ethernet (SERDES)",
   1323 	  WM_T_82576,		WMP_F_SERDES },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1326 	  "82580 1000BaseT Ethernet",
   1327 	  WM_T_82580,		WMP_F_COPPER },
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1329 	  "82580 1000BaseX Ethernet",
   1330 	  WM_T_82580,		WMP_F_FIBER },
   1331 
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1333 	  "82580 1000BaseT Ethernet (SERDES)",
   1334 	  WM_T_82580,		WMP_F_SERDES },
   1335 
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1337 	  "82580 gigabit Ethernet (SGMII)",
   1338 	  WM_T_82580,		WMP_F_COPPER },
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1340 	  "82580 dual-1000BaseT Ethernet",
   1341 	  WM_T_82580,		WMP_F_COPPER },
   1342 
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1344 	  "82580 quad-1000BaseX Ethernet",
   1345 	  WM_T_82580,		WMP_F_FIBER },
   1346 
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1348 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1349 	  WM_T_82580,		WMP_F_COPPER },
   1350 
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1352 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1353 	  WM_T_82580,		WMP_F_SERDES },
   1354 
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1356 	  "DH89XXCC 1000BASE-KX Ethernet",
   1357 	  WM_T_82580,		WMP_F_SERDES },
   1358 
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1360 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1361 	  WM_T_82580,		WMP_F_SERDES },
   1362 
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1364 	  "I350 Gigabit Network Connection",
   1365 	  WM_T_I350,		WMP_F_COPPER },
   1366 
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1368 	  "I350 Gigabit Fiber Network Connection",
   1369 	  WM_T_I350,		WMP_F_FIBER },
   1370 
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1372 	  "I350 Gigabit Backplane Connection",
   1373 	  WM_T_I350,		WMP_F_SERDES },
   1374 
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1376 	  "I350 Quad Port Gigabit Ethernet",
   1377 	  WM_T_I350,		WMP_F_SERDES },
   1378 
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1380 	  "I350 Gigabit Connection",
   1381 	  WM_T_I350,		WMP_F_COPPER },
   1382 
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1384 	  "I354 Gigabit Ethernet (KX)",
   1385 	  WM_T_I354,		WMP_F_SERDES },
   1386 
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1388 	  "I354 Gigabit Ethernet (SGMII)",
   1389 	  WM_T_I354,		WMP_F_COPPER },
   1390 
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1392 	  "I354 Gigabit Ethernet (2.5G)",
   1393 	  WM_T_I354,		WMP_F_COPPER },
   1394 
   1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1396 	  "I210-T1 Ethernet Server Adapter",
   1397 	  WM_T_I210,		WMP_F_COPPER },
   1398 
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1400 	  "I210 Ethernet (Copper OEM)",
   1401 	  WM_T_I210,		WMP_F_COPPER },
   1402 
   1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1404 	  "I210 Ethernet (Copper IT)",
   1405 	  WM_T_I210,		WMP_F_COPPER },
   1406 
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1408 	  "I210 Ethernet (FLASH less)",
   1409 	  WM_T_I210,		WMP_F_COPPER },
   1410 
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1412 	  "I210 Gigabit Ethernet (Fiber)",
   1413 	  WM_T_I210,		WMP_F_FIBER },
   1414 
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1416 	  "I210 Gigabit Ethernet (SERDES)",
   1417 	  WM_T_I210,		WMP_F_SERDES },
   1418 
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1420 	  "I210 Gigabit Ethernet (FLASH less)",
   1421 	  WM_T_I210,		WMP_F_SERDES },
   1422 
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1424 	  "I210 Gigabit Ethernet (SGMII)",
   1425 	  WM_T_I210,		WMP_F_COPPER },
   1426 
   1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1428 	  "I211 Ethernet (COPPER)",
   1429 	  WM_T_I211,		WMP_F_COPPER },
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1431 	  "I217 V Ethernet Connection",
   1432 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1434 	  "I217 LM Ethernet Connection",
   1435 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1437 	  "I218 V Ethernet Connection",
   1438 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1439 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1440 	  "I218 V Ethernet Connection",
   1441 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1443 	  "I218 V Ethernet Connection",
   1444 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1446 	  "I218 LM Ethernet Connection",
   1447 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1448 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1449 	  "I218 LM Ethernet Connection",
   1450 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1451 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1452 	  "I218 LM Ethernet Connection",
   1453 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1454 #if 0
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1456 	  "I219 V Ethernet Connection",
   1457 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1459 	  "I219 V Ethernet Connection",
   1460 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1462 	  "I219 V Ethernet Connection",
   1463 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1465 	  "I219 V Ethernet Connection",
   1466 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1467 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1468 	  "I219 LM Ethernet Connection",
   1469 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1471 	  "I219 LM Ethernet Connection",
   1472 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1474 	  "I219 LM Ethernet Connection",
   1475 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1476 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1477 	  "I219 LM Ethernet Connection",
   1478 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1479 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1480 	  "I219 LM Ethernet Connection",
   1481 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1482 #endif
   1483 	{ 0,			0,
   1484 	  NULL,
   1485 	  0,			0 },
   1486 };
   1487 
   1488 /*
   1489  * Register read/write functions.
   1490  * Other than CSR_{READ|WRITE}().
   1491  */
   1492 
   1493 #if 0 /* Not currently used */
   1494 static inline uint32_t
   1495 wm_io_read(struct wm_softc *sc, int reg)
   1496 {
   1497 
   1498 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1499 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1500 }
   1501 #endif
   1502 
   1503 static inline void
   1504 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1505 {
   1506 
   1507 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1508 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1509 }
   1510 
   1511 static inline void
   1512 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1513     uint32_t data)
   1514 {
   1515 	uint32_t regval;
   1516 	int i;
   1517 
   1518 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1519 
   1520 	CSR_WRITE(sc, reg, regval);
   1521 
   1522 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1523 		delay(5);
   1524 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1525 			break;
   1526 	}
   1527 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1528 		aprint_error("%s: WARNING:"
   1529 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1530 		    device_xname(sc->sc_dev), reg);
   1531 	}
   1532 }
   1533 
   1534 static inline void
   1535 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1536 {
   1537 	wa->wa_low = htole32(v & 0xffffffffU);
   1538 	if (sizeof(bus_addr_t) == 8)
   1539 		wa->wa_high = htole32((uint64_t) v >> 32);
   1540 	else
   1541 		wa->wa_high = 0;
   1542 }
   1543 
   1544 /*
   1545  * Descriptor sync/init functions.
   1546  */
   1547 static inline void
   1548 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1549 {
   1550 	struct wm_softc *sc = txq->txq_sc;
   1551 
   1552 	/* If it will wrap around, sync to the end of the ring. */
   1553 	if ((start + num) > WM_NTXDESC(txq)) {
   1554 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1555 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1556 		    (WM_NTXDESC(txq) - start), ops);
   1557 		num -= (WM_NTXDESC(txq) - start);
   1558 		start = 0;
   1559 	}
   1560 
   1561 	/* Now sync whatever is left. */
   1562 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1563 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1564 }
   1565 
   1566 static inline void
   1567 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1568 {
   1569 	struct wm_softc *sc = rxq->rxq_sc;
   1570 
   1571 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1572 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1573 }
   1574 
   1575 static inline void
   1576 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1577 {
   1578 	struct wm_softc *sc = rxq->rxq_sc;
   1579 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1580 	struct mbuf *m = rxs->rxs_mbuf;
   1581 
   1582 	/*
   1583 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1584 	 * so that the payload after the Ethernet header is aligned
   1585 	 * to a 4-byte boundary.
   1586 
   1587 	 * XXX BRAINDAMAGE ALERT!
   1588 	 * The stupid chip uses the same size for every buffer, which
   1589 	 * is set in the Receive Control register.  We are using the 2K
   1590 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1591 	 * reason, we can't "scoot" packets longer than the standard
   1592 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1593 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1594 	 * the upper layer copy the headers.
   1595 	 */
   1596 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1597 
   1598 	if (sc->sc_type == WM_T_82574) {
   1599 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1600 		rxd->erx_data.erxd_addr =
   1601 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1602 		rxd->erx_data.erxd_dd = 0;
   1603 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1604 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1605 
   1606 		rxd->nqrx_data.nrxd_paddr =
   1607 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1608 		/* Currently, split header is not supported. */
   1609 		rxd->nqrx_data.nrxd_haddr = 0;
   1610 	} else {
   1611 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1612 
   1613 		wm_set_dma_addr(&rxd->wrx_addr,
   1614 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1615 		rxd->wrx_len = 0;
   1616 		rxd->wrx_cksum = 0;
   1617 		rxd->wrx_status = 0;
   1618 		rxd->wrx_errors = 0;
   1619 		rxd->wrx_special = 0;
   1620 	}
   1621 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1622 
   1623 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1624 }
   1625 
   1626 /*
   1627  * Device driver interface functions and commonly used functions.
   1628  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1629  */
   1630 
   1631 /* Lookup supported device table */
   1632 static const struct wm_product *
   1633 wm_lookup(const struct pci_attach_args *pa)
   1634 {
   1635 	const struct wm_product *wmp;
   1636 
   1637 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1638 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1639 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1640 			return wmp;
   1641 	}
   1642 	return NULL;
   1643 }
   1644 
   1645 /* The match function (ca_match) */
   1646 static int
   1647 wm_match(device_t parent, cfdata_t cf, void *aux)
   1648 {
   1649 	struct pci_attach_args *pa = aux;
   1650 
   1651 	if (wm_lookup(pa) != NULL)
   1652 		return 1;
   1653 
   1654 	return 0;
   1655 }
   1656 
   1657 /* The attach function (ca_attach) */
   1658 static void
   1659 wm_attach(device_t parent, device_t self, void *aux)
   1660 {
   1661 	struct wm_softc *sc = device_private(self);
   1662 	struct pci_attach_args *pa = aux;
   1663 	prop_dictionary_t dict;
   1664 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1665 	pci_chipset_tag_t pc = pa->pa_pc;
   1666 	int counts[PCI_INTR_TYPE_SIZE];
   1667 	pci_intr_type_t max_type;
   1668 	const char *eetype, *xname;
   1669 	bus_space_tag_t memt;
   1670 	bus_space_handle_t memh;
   1671 	bus_size_t memsize;
   1672 	int memh_valid;
   1673 	int i, error;
   1674 	const struct wm_product *wmp;
   1675 	prop_data_t ea;
   1676 	prop_number_t pn;
   1677 	uint8_t enaddr[ETHER_ADDR_LEN];
   1678 	char buf[256];
   1679 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1680 	pcireg_t preg, memtype;
   1681 	uint16_t eeprom_data, apme_mask;
   1682 	bool force_clear_smbi;
   1683 	uint32_t link_mode;
   1684 	uint32_t reg;
   1685 
   1686 	sc->sc_dev = self;
   1687 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1688 	sc->sc_core_stopping = false;
   1689 
   1690 	wmp = wm_lookup(pa);
   1691 #ifdef DIAGNOSTIC
   1692 	if (wmp == NULL) {
   1693 		printf("\n");
   1694 		panic("wm_attach: impossible");
   1695 	}
   1696 #endif
   1697 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1698 
   1699 	sc->sc_pc = pa->pa_pc;
   1700 	sc->sc_pcitag = pa->pa_tag;
   1701 
   1702 	if (pci_dma64_available(pa))
   1703 		sc->sc_dmat = pa->pa_dmat64;
   1704 	else
   1705 		sc->sc_dmat = pa->pa_dmat;
   1706 
   1707 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1708 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1709 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1710 
   1711 	sc->sc_type = wmp->wmp_type;
   1712 
   1713 	/* Set default function pointers */
   1714 	sc->phy.acquire = wm_get_null;
   1715 	sc->phy.release = wm_put_null;
   1716 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1717 
   1718 	if (sc->sc_type < WM_T_82543) {
   1719 		if (sc->sc_rev < 2) {
   1720 			aprint_error_dev(sc->sc_dev,
   1721 			    "i82542 must be at least rev. 2\n");
   1722 			return;
   1723 		}
   1724 		if (sc->sc_rev < 3)
   1725 			sc->sc_type = WM_T_82542_2_0;
   1726 	}
   1727 
   1728 	/*
   1729 	 * Disable MSI for Errata:
   1730 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1731 	 *
   1732 	 *  82544: Errata 25
   1733 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1734 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1735 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1736 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1737 	 *
   1738 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1739 	 *
   1740 	 *  82571 & 82572: Errata 63
   1741 	 */
   1742 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1743 	    || (sc->sc_type == WM_T_82572))
   1744 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1745 
   1746 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1747 	    || (sc->sc_type == WM_T_82580)
   1748 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1749 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1750 		sc->sc_flags |= WM_F_NEWQUEUE;
   1751 
   1752 	/* Set device properties (mactype) */
   1753 	dict = device_properties(sc->sc_dev);
   1754 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1755 
   1756 	/*
   1757 	 * Map the device.  All devices support memory-mapped acccess,
   1758 	 * and it is really required for normal operation.
   1759 	 */
   1760 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1761 	switch (memtype) {
   1762 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1763 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1764 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1765 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1766 		break;
   1767 	default:
   1768 		memh_valid = 0;
   1769 		break;
   1770 	}
   1771 
   1772 	if (memh_valid) {
   1773 		sc->sc_st = memt;
   1774 		sc->sc_sh = memh;
   1775 		sc->sc_ss = memsize;
   1776 	} else {
   1777 		aprint_error_dev(sc->sc_dev,
   1778 		    "unable to map device registers\n");
   1779 		return;
   1780 	}
   1781 
   1782 	/*
   1783 	 * In addition, i82544 and later support I/O mapped indirect
   1784 	 * register access.  It is not desirable (nor supported in
   1785 	 * this driver) to use it for normal operation, though it is
   1786 	 * required to work around bugs in some chip versions.
   1787 	 */
   1788 	if (sc->sc_type >= WM_T_82544) {
   1789 		/* First we have to find the I/O BAR. */
   1790 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1791 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1792 			if (memtype == PCI_MAPREG_TYPE_IO)
   1793 				break;
   1794 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1795 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1796 				i += 4;	/* skip high bits, too */
   1797 		}
   1798 		if (i < PCI_MAPREG_END) {
   1799 			/*
   1800 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1801 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1802 			 * It's no problem because newer chips has no this
   1803 			 * bug.
   1804 			 *
   1805 			 * The i8254x doesn't apparently respond when the
   1806 			 * I/O BAR is 0, which looks somewhat like it's not
   1807 			 * been configured.
   1808 			 */
   1809 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1810 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1811 				aprint_error_dev(sc->sc_dev,
   1812 				    "WARNING: I/O BAR at zero.\n");
   1813 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1814 					0, &sc->sc_iot, &sc->sc_ioh,
   1815 					NULL, &sc->sc_ios) == 0) {
   1816 				sc->sc_flags |= WM_F_IOH_VALID;
   1817 			} else {
   1818 				aprint_error_dev(sc->sc_dev,
   1819 				    "WARNING: unable to map I/O space\n");
   1820 			}
   1821 		}
   1822 
   1823 	}
   1824 
   1825 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1826 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1827 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1828 	if (sc->sc_type < WM_T_82542_2_1)
   1829 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1830 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1831 
   1832 	/* power up chip */
   1833 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1834 	    NULL)) && error != EOPNOTSUPP) {
   1835 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1836 		return;
   1837 	}
   1838 
   1839 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1840 
   1841 	/* Allocation settings */
   1842 	max_type = PCI_INTR_TYPE_MSIX;
   1843 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1844 	counts[PCI_INTR_TYPE_MSI] = 1;
   1845 	counts[PCI_INTR_TYPE_INTX] = 1;
   1846 	/* overridden by disable flags */
   1847 	if (wm_disable_msi != 0) {
   1848 		counts[PCI_INTR_TYPE_MSI] = 0;
   1849 		if (wm_disable_msix != 0) {
   1850 			max_type = PCI_INTR_TYPE_INTX;
   1851 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1852 		}
   1853 	} else if (wm_disable_msix != 0) {
   1854 		max_type = PCI_INTR_TYPE_MSI;
   1855 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1856 	}
   1857 
   1858 alloc_retry:
   1859 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1860 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1861 		return;
   1862 	}
   1863 
   1864 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1865 		error = wm_setup_msix(sc);
   1866 		if (error) {
   1867 			pci_intr_release(pc, sc->sc_intrs,
   1868 			    counts[PCI_INTR_TYPE_MSIX]);
   1869 
   1870 			/* Setup for MSI: Disable MSI-X */
   1871 			max_type = PCI_INTR_TYPE_MSI;
   1872 			counts[PCI_INTR_TYPE_MSI] = 1;
   1873 			counts[PCI_INTR_TYPE_INTX] = 1;
   1874 			goto alloc_retry;
   1875 		}
   1876 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1877 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1878 		error = wm_setup_legacy(sc);
   1879 		if (error) {
   1880 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1881 			    counts[PCI_INTR_TYPE_MSI]);
   1882 
   1883 			/* The next try is for INTx: Disable MSI */
   1884 			max_type = PCI_INTR_TYPE_INTX;
   1885 			counts[PCI_INTR_TYPE_INTX] = 1;
   1886 			goto alloc_retry;
   1887 		}
   1888 	} else {
   1889 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1890 		error = wm_setup_legacy(sc);
   1891 		if (error) {
   1892 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1893 			    counts[PCI_INTR_TYPE_INTX]);
   1894 			return;
   1895 		}
   1896 	}
   1897 
   1898 	/*
   1899 	 * Check the function ID (unit number of the chip).
   1900 	 */
   1901 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1902 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1903 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1904 	    || (sc->sc_type == WM_T_82580)
   1905 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1906 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1907 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1908 	else
   1909 		sc->sc_funcid = 0;
   1910 
   1911 	/*
   1912 	 * Determine a few things about the bus we're connected to.
   1913 	 */
   1914 	if (sc->sc_type < WM_T_82543) {
   1915 		/* We don't really know the bus characteristics here. */
   1916 		sc->sc_bus_speed = 33;
   1917 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1918 		/*
   1919 		 * CSA (Communication Streaming Architecture) is about as fast
   1920 		 * a 32-bit 66MHz PCI Bus.
   1921 		 */
   1922 		sc->sc_flags |= WM_F_CSA;
   1923 		sc->sc_bus_speed = 66;
   1924 		aprint_verbose_dev(sc->sc_dev,
   1925 		    "Communication Streaming Architecture\n");
   1926 		if (sc->sc_type == WM_T_82547) {
   1927 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1928 			callout_setfunc(&sc->sc_txfifo_ch,
   1929 					wm_82547_txfifo_stall, sc);
   1930 			aprint_verbose_dev(sc->sc_dev,
   1931 			    "using 82547 Tx FIFO stall work-around\n");
   1932 		}
   1933 	} else if (sc->sc_type >= WM_T_82571) {
   1934 		sc->sc_flags |= WM_F_PCIE;
   1935 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1936 		    && (sc->sc_type != WM_T_ICH10)
   1937 		    && (sc->sc_type != WM_T_PCH)
   1938 		    && (sc->sc_type != WM_T_PCH2)
   1939 		    && (sc->sc_type != WM_T_PCH_LPT)
   1940 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1941 			/* ICH* and PCH* have no PCIe capability registers */
   1942 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1943 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1944 				NULL) == 0)
   1945 				aprint_error_dev(sc->sc_dev,
   1946 				    "unable to find PCIe capability\n");
   1947 		}
   1948 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1949 	} else {
   1950 		reg = CSR_READ(sc, WMREG_STATUS);
   1951 		if (reg & STATUS_BUS64)
   1952 			sc->sc_flags |= WM_F_BUS64;
   1953 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1954 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1955 
   1956 			sc->sc_flags |= WM_F_PCIX;
   1957 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1958 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1959 				aprint_error_dev(sc->sc_dev,
   1960 				    "unable to find PCIX capability\n");
   1961 			else if (sc->sc_type != WM_T_82545_3 &&
   1962 				 sc->sc_type != WM_T_82546_3) {
   1963 				/*
   1964 				 * Work around a problem caused by the BIOS
   1965 				 * setting the max memory read byte count
   1966 				 * incorrectly.
   1967 				 */
   1968 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1969 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1970 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1971 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1972 
   1973 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1974 				    PCIX_CMD_BYTECNT_SHIFT;
   1975 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1976 				    PCIX_STATUS_MAXB_SHIFT;
   1977 				if (bytecnt > maxb) {
   1978 					aprint_verbose_dev(sc->sc_dev,
   1979 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1980 					    512 << bytecnt, 512 << maxb);
   1981 					pcix_cmd = (pcix_cmd &
   1982 					    ~PCIX_CMD_BYTECNT_MASK) |
   1983 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1984 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1985 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1986 					    pcix_cmd);
   1987 				}
   1988 			}
   1989 		}
   1990 		/*
   1991 		 * The quad port adapter is special; it has a PCIX-PCIX
   1992 		 * bridge on the board, and can run the secondary bus at
   1993 		 * a higher speed.
   1994 		 */
   1995 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1996 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1997 								      : 66;
   1998 		} else if (sc->sc_flags & WM_F_PCIX) {
   1999 			switch (reg & STATUS_PCIXSPD_MASK) {
   2000 			case STATUS_PCIXSPD_50_66:
   2001 				sc->sc_bus_speed = 66;
   2002 				break;
   2003 			case STATUS_PCIXSPD_66_100:
   2004 				sc->sc_bus_speed = 100;
   2005 				break;
   2006 			case STATUS_PCIXSPD_100_133:
   2007 				sc->sc_bus_speed = 133;
   2008 				break;
   2009 			default:
   2010 				aprint_error_dev(sc->sc_dev,
   2011 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2012 				    reg & STATUS_PCIXSPD_MASK);
   2013 				sc->sc_bus_speed = 66;
   2014 				break;
   2015 			}
   2016 		} else
   2017 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2018 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2019 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2020 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2021 	}
   2022 
   2023 	/* clear interesting stat counters */
   2024 	CSR_READ(sc, WMREG_COLC);
   2025 	CSR_READ(sc, WMREG_RXERRC);
   2026 
   2027 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2028 	    || (sc->sc_type >= WM_T_ICH8))
   2029 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2030 	if (sc->sc_type >= WM_T_ICH8)
   2031 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2032 
   2033 	/* Set PHY, NVM mutex related stuff */
   2034 	switch (sc->sc_type) {
   2035 	case WM_T_82542_2_0:
   2036 	case WM_T_82542_2_1:
   2037 	case WM_T_82543:
   2038 	case WM_T_82544:
   2039 		/* Microwire */
   2040 		sc->sc_nvm_wordsize = 64;
   2041 		sc->sc_nvm_addrbits = 6;
   2042 		break;
   2043 	case WM_T_82540:
   2044 	case WM_T_82545:
   2045 	case WM_T_82545_3:
   2046 	case WM_T_82546:
   2047 	case WM_T_82546_3:
   2048 		/* Microwire */
   2049 		reg = CSR_READ(sc, WMREG_EECD);
   2050 		if (reg & EECD_EE_SIZE) {
   2051 			sc->sc_nvm_wordsize = 256;
   2052 			sc->sc_nvm_addrbits = 8;
   2053 		} else {
   2054 			sc->sc_nvm_wordsize = 64;
   2055 			sc->sc_nvm_addrbits = 6;
   2056 		}
   2057 		sc->sc_flags |= WM_F_LOCK_EECD;
   2058 		break;
   2059 	case WM_T_82541:
   2060 	case WM_T_82541_2:
   2061 	case WM_T_82547:
   2062 	case WM_T_82547_2:
   2063 		sc->sc_flags |= WM_F_LOCK_EECD;
   2064 		reg = CSR_READ(sc, WMREG_EECD);
   2065 		if (reg & EECD_EE_TYPE) {
   2066 			/* SPI */
   2067 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2068 			wm_nvm_set_addrbits_size_eecd(sc);
   2069 		} else {
   2070 			/* Microwire */
   2071 			if ((reg & EECD_EE_ABITS) != 0) {
   2072 				sc->sc_nvm_wordsize = 256;
   2073 				sc->sc_nvm_addrbits = 8;
   2074 			} else {
   2075 				sc->sc_nvm_wordsize = 64;
   2076 				sc->sc_nvm_addrbits = 6;
   2077 			}
   2078 		}
   2079 		break;
   2080 	case WM_T_82571:
   2081 	case WM_T_82572:
   2082 		/* SPI */
   2083 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2084 		wm_nvm_set_addrbits_size_eecd(sc);
   2085 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2086 		sc->phy.acquire = wm_get_swsm_semaphore;
   2087 		sc->phy.release = wm_put_swsm_semaphore;
   2088 		break;
   2089 	case WM_T_82573:
   2090 	case WM_T_82574:
   2091 	case WM_T_82583:
   2092 		if (sc->sc_type == WM_T_82573) {
   2093 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2094 			sc->phy.acquire = wm_get_swsm_semaphore;
   2095 			sc->phy.release = wm_put_swsm_semaphore;
   2096 		} else {
   2097 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2098 			/* Both PHY and NVM use the same semaphore. */
   2099 			sc->phy.acquire
   2100 			    = wm_get_swfwhw_semaphore;
   2101 			sc->phy.release
   2102 			    = wm_put_swfwhw_semaphore;
   2103 		}
   2104 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2105 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2106 			sc->sc_nvm_wordsize = 2048;
   2107 		} else {
   2108 			/* SPI */
   2109 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2110 			wm_nvm_set_addrbits_size_eecd(sc);
   2111 		}
   2112 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2113 		break;
   2114 	case WM_T_82575:
   2115 	case WM_T_82576:
   2116 	case WM_T_82580:
   2117 	case WM_T_I350:
   2118 	case WM_T_I354:
   2119 	case WM_T_80003:
   2120 		/* SPI */
   2121 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2122 		wm_nvm_set_addrbits_size_eecd(sc);
   2123 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2124 		    | WM_F_LOCK_SWSM;
   2125 		sc->phy.acquire = wm_get_phy_82575;
   2126 		sc->phy.release = wm_put_phy_82575;
   2127 		break;
   2128 	case WM_T_ICH8:
   2129 	case WM_T_ICH9:
   2130 	case WM_T_ICH10:
   2131 	case WM_T_PCH:
   2132 	case WM_T_PCH2:
   2133 	case WM_T_PCH_LPT:
   2134 		/* FLASH */
   2135 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2136 		sc->sc_nvm_wordsize = 2048;
   2137 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2138 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2139 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2140 			aprint_error_dev(sc->sc_dev,
   2141 			    "can't map FLASH registers\n");
   2142 			goto out;
   2143 		}
   2144 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2145 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2146 		    ICH_FLASH_SECTOR_SIZE;
   2147 		sc->sc_ich8_flash_bank_size =
   2148 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2149 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2150 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2151 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2152 		sc->sc_flashreg_offset = 0;
   2153 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2154 		sc->phy.release = wm_put_swflag_ich8lan;
   2155 		break;
   2156 	case WM_T_PCH_SPT:
   2157 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2158 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2159 		sc->sc_flasht = sc->sc_st;
   2160 		sc->sc_flashh = sc->sc_sh;
   2161 		sc->sc_ich8_flash_base = 0;
   2162 		sc->sc_nvm_wordsize =
   2163 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2164 			* NVM_SIZE_MULTIPLIER;
   2165 		/* It is size in bytes, we want words */
   2166 		sc->sc_nvm_wordsize /= 2;
   2167 		/* assume 2 banks */
   2168 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2169 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2170 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2171 		sc->phy.release = wm_put_swflag_ich8lan;
   2172 		break;
   2173 	case WM_T_I210:
   2174 	case WM_T_I211:
   2175 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2176 			wm_nvm_set_addrbits_size_eecd(sc);
   2177 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2178 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2179 		} else {
   2180 			sc->sc_nvm_wordsize = INVM_SIZE;
   2181 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2182 		}
   2183 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2184 		sc->phy.acquire = wm_get_phy_82575;
   2185 		sc->phy.release = wm_put_phy_82575;
   2186 		break;
   2187 	default:
   2188 		break;
   2189 	}
   2190 
   2191 	/* Reset the chip to a known state. */
   2192 	wm_reset(sc);
   2193 
   2194 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2195 	switch (sc->sc_type) {
   2196 	case WM_T_82571:
   2197 	case WM_T_82572:
   2198 		reg = CSR_READ(sc, WMREG_SWSM2);
   2199 		if ((reg & SWSM2_LOCK) == 0) {
   2200 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2201 			force_clear_smbi = true;
   2202 		} else
   2203 			force_clear_smbi = false;
   2204 		break;
   2205 	case WM_T_82573:
   2206 	case WM_T_82574:
   2207 	case WM_T_82583:
   2208 		force_clear_smbi = true;
   2209 		break;
   2210 	default:
   2211 		force_clear_smbi = false;
   2212 		break;
   2213 	}
   2214 	if (force_clear_smbi) {
   2215 		reg = CSR_READ(sc, WMREG_SWSM);
   2216 		if ((reg & SWSM_SMBI) != 0)
   2217 			aprint_error_dev(sc->sc_dev,
   2218 			    "Please update the Bootagent\n");
   2219 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2220 	}
   2221 
   2222 	/*
   2223 	 * Defer printing the EEPROM type until after verifying the checksum
   2224 	 * This allows the EEPROM type to be printed correctly in the case
   2225 	 * that no EEPROM is attached.
   2226 	 */
   2227 	/*
   2228 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2229 	 * this for later, so we can fail future reads from the EEPROM.
   2230 	 */
   2231 	if (wm_nvm_validate_checksum(sc)) {
   2232 		/*
   2233 		 * Read twice again because some PCI-e parts fail the
   2234 		 * first check due to the link being in sleep state.
   2235 		 */
   2236 		if (wm_nvm_validate_checksum(sc))
   2237 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2238 	}
   2239 
   2240 	/* Set device properties (macflags) */
   2241 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2242 
   2243 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2244 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2245 	else {
   2246 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2247 		    sc->sc_nvm_wordsize);
   2248 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2249 			aprint_verbose("iNVM");
   2250 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2251 			aprint_verbose("FLASH(HW)");
   2252 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2253 			aprint_verbose("FLASH");
   2254 		else {
   2255 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2256 				eetype = "SPI";
   2257 			else
   2258 				eetype = "MicroWire";
   2259 			aprint_verbose("(%d address bits) %s EEPROM",
   2260 			    sc->sc_nvm_addrbits, eetype);
   2261 		}
   2262 	}
   2263 	wm_nvm_version(sc);
   2264 	aprint_verbose("\n");
   2265 
   2266 	/* Check for I21[01] PLL workaround */
   2267 	if (sc->sc_type == WM_T_I210)
   2268 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2269 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2270 		/* NVM image release 3.25 has a workaround */
   2271 		if ((sc->sc_nvm_ver_major < 3)
   2272 		    || ((sc->sc_nvm_ver_major == 3)
   2273 			&& (sc->sc_nvm_ver_minor < 25))) {
   2274 			aprint_verbose_dev(sc->sc_dev,
   2275 			    "ROM image version %d.%d is older than 3.25\n",
   2276 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2277 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2278 		}
   2279 	}
   2280 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2281 		wm_pll_workaround_i210(sc);
   2282 
   2283 	wm_get_wakeup(sc);
   2284 
   2285 	/* Non-AMT based hardware can now take control from firmware */
   2286 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2287 		wm_get_hw_control(sc);
   2288 
   2289 	/*
   2290 	 * Read the Ethernet address from the EEPROM, if not first found
   2291 	 * in device properties.
   2292 	 */
   2293 	ea = prop_dictionary_get(dict, "mac-address");
   2294 	if (ea != NULL) {
   2295 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2296 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2297 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2298 	} else {
   2299 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2300 			aprint_error_dev(sc->sc_dev,
   2301 			    "unable to read Ethernet address\n");
   2302 			goto out;
   2303 		}
   2304 	}
   2305 
   2306 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2307 	    ether_sprintf(enaddr));
   2308 
   2309 	/*
   2310 	 * Read the config info from the EEPROM, and set up various
   2311 	 * bits in the control registers based on their contents.
   2312 	 */
   2313 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2314 	if (pn != NULL) {
   2315 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2316 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2317 	} else {
   2318 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2319 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2320 			goto out;
   2321 		}
   2322 	}
   2323 
   2324 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2325 	if (pn != NULL) {
   2326 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2327 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2328 	} else {
   2329 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2330 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2331 			goto out;
   2332 		}
   2333 	}
   2334 
   2335 	/* check for WM_F_WOL */
   2336 	switch (sc->sc_type) {
   2337 	case WM_T_82542_2_0:
   2338 	case WM_T_82542_2_1:
   2339 	case WM_T_82543:
   2340 		/* dummy? */
   2341 		eeprom_data = 0;
   2342 		apme_mask = NVM_CFG3_APME;
   2343 		break;
   2344 	case WM_T_82544:
   2345 		apme_mask = NVM_CFG2_82544_APM_EN;
   2346 		eeprom_data = cfg2;
   2347 		break;
   2348 	case WM_T_82546:
   2349 	case WM_T_82546_3:
   2350 	case WM_T_82571:
   2351 	case WM_T_82572:
   2352 	case WM_T_82573:
   2353 	case WM_T_82574:
   2354 	case WM_T_82583:
   2355 	case WM_T_80003:
   2356 	default:
   2357 		apme_mask = NVM_CFG3_APME;
   2358 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2359 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2360 		break;
   2361 	case WM_T_82575:
   2362 	case WM_T_82576:
   2363 	case WM_T_82580:
   2364 	case WM_T_I350:
   2365 	case WM_T_I354: /* XXX ok? */
   2366 	case WM_T_ICH8:
   2367 	case WM_T_ICH9:
   2368 	case WM_T_ICH10:
   2369 	case WM_T_PCH:
   2370 	case WM_T_PCH2:
   2371 	case WM_T_PCH_LPT:
   2372 	case WM_T_PCH_SPT:
   2373 		/* XXX The funcid should be checked on some devices */
   2374 		apme_mask = WUC_APME;
   2375 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2376 		break;
   2377 	}
   2378 
   2379 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2380 	if ((eeprom_data & apme_mask) != 0)
   2381 		sc->sc_flags |= WM_F_WOL;
   2382 
   2383 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2384 		/* Check NVM for autonegotiation */
   2385 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2386 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2387 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2388 		}
   2389 	}
   2390 
   2391 	/*
   2392 	 * XXX need special handling for some multiple port cards
   2393 	 * to disable a paticular port.
   2394 	 */
   2395 
   2396 	if (sc->sc_type >= WM_T_82544) {
   2397 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2398 		if (pn != NULL) {
   2399 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2400 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2401 		} else {
   2402 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2403 				aprint_error_dev(sc->sc_dev,
   2404 				    "unable to read SWDPIN\n");
   2405 				goto out;
   2406 			}
   2407 		}
   2408 	}
   2409 
   2410 	if (cfg1 & NVM_CFG1_ILOS)
   2411 		sc->sc_ctrl |= CTRL_ILOS;
   2412 
   2413 	/*
   2414 	 * XXX
   2415 	 * This code isn't correct because pin 2 and 3 are located
   2416 	 * in different position on newer chips. Check all datasheet.
   2417 	 *
   2418 	 * Until resolve this problem, check if a chip < 82580
   2419 	 */
   2420 	if (sc->sc_type <= WM_T_82580) {
   2421 		if (sc->sc_type >= WM_T_82544) {
   2422 			sc->sc_ctrl |=
   2423 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2424 			    CTRL_SWDPIO_SHIFT;
   2425 			sc->sc_ctrl |=
   2426 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2427 			    CTRL_SWDPINS_SHIFT;
   2428 		} else {
   2429 			sc->sc_ctrl |=
   2430 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2431 			    CTRL_SWDPIO_SHIFT;
   2432 		}
   2433 	}
   2434 
   2435 	/* XXX For other than 82580? */
   2436 	if (sc->sc_type == WM_T_82580) {
   2437 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2438 		if (nvmword & __BIT(13))
   2439 			sc->sc_ctrl |= CTRL_ILOS;
   2440 	}
   2441 
   2442 #if 0
   2443 	if (sc->sc_type >= WM_T_82544) {
   2444 		if (cfg1 & NVM_CFG1_IPS0)
   2445 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2446 		if (cfg1 & NVM_CFG1_IPS1)
   2447 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2448 		sc->sc_ctrl_ext |=
   2449 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2450 		    CTRL_EXT_SWDPIO_SHIFT;
   2451 		sc->sc_ctrl_ext |=
   2452 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2453 		    CTRL_EXT_SWDPINS_SHIFT;
   2454 	} else {
   2455 		sc->sc_ctrl_ext |=
   2456 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2457 		    CTRL_EXT_SWDPIO_SHIFT;
   2458 	}
   2459 #endif
   2460 
   2461 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2462 #if 0
   2463 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2464 #endif
   2465 
   2466 	if (sc->sc_type == WM_T_PCH) {
   2467 		uint16_t val;
   2468 
   2469 		/* Save the NVM K1 bit setting */
   2470 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2471 
   2472 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2473 			sc->sc_nvm_k1_enabled = 1;
   2474 		else
   2475 			sc->sc_nvm_k1_enabled = 0;
   2476 	}
   2477 
   2478 	/*
   2479 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2480 	 * media structures accordingly.
   2481 	 */
   2482 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2483 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2484 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2485 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2486 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2487 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2488 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2489 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2490 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2491 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2492 	    || (sc->sc_type ==WM_T_I211)) {
   2493 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2494 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2495 		switch (link_mode) {
   2496 		case CTRL_EXT_LINK_MODE_1000KX:
   2497 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2498 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2499 			break;
   2500 		case CTRL_EXT_LINK_MODE_SGMII:
   2501 			if (wm_sgmii_uses_mdio(sc)) {
   2502 				aprint_verbose_dev(sc->sc_dev,
   2503 				    "SGMII(MDIO)\n");
   2504 				sc->sc_flags |= WM_F_SGMII;
   2505 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2506 				break;
   2507 			}
   2508 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2509 			/*FALLTHROUGH*/
   2510 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2511 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2512 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2513 				if (link_mode
   2514 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2515 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2516 					sc->sc_flags |= WM_F_SGMII;
   2517 				} else {
   2518 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2519 					aprint_verbose_dev(sc->sc_dev,
   2520 					    "SERDES\n");
   2521 				}
   2522 				break;
   2523 			}
   2524 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2525 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2526 
   2527 			/* Change current link mode setting */
   2528 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2529 			switch (sc->sc_mediatype) {
   2530 			case WM_MEDIATYPE_COPPER:
   2531 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2532 				break;
   2533 			case WM_MEDIATYPE_SERDES:
   2534 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2535 				break;
   2536 			default:
   2537 				break;
   2538 			}
   2539 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2540 			break;
   2541 		case CTRL_EXT_LINK_MODE_GMII:
   2542 		default:
   2543 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2544 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2545 			break;
   2546 		}
   2547 
   2548 		reg &= ~CTRL_EXT_I2C_ENA;
   2549 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2550 			reg |= CTRL_EXT_I2C_ENA;
   2551 		else
   2552 			reg &= ~CTRL_EXT_I2C_ENA;
   2553 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2554 
   2555 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2556 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2557 		else
   2558 			wm_tbi_mediainit(sc);
   2559 	} else if (sc->sc_type < WM_T_82543 ||
   2560 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2561 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2562 			aprint_error_dev(sc->sc_dev,
   2563 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2564 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2565 		}
   2566 		wm_tbi_mediainit(sc);
   2567 	} else {
   2568 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2569 			aprint_error_dev(sc->sc_dev,
   2570 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2571 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2572 		}
   2573 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2574 	}
   2575 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2576 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2577 
   2578 	ifp = &sc->sc_ethercom.ec_if;
   2579 	xname = device_xname(sc->sc_dev);
   2580 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2581 	ifp->if_softc = sc;
   2582 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2583 #ifdef WM_MPSAFE
   2584 	ifp->if_extflags = IFEF_START_MPSAFE;
   2585 #endif
   2586 	ifp->if_ioctl = wm_ioctl;
   2587 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2588 		ifp->if_start = wm_nq_start;
   2589 		/*
   2590 		 * When the number of CPUs is one and the controller can use
   2591 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2592 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2593 		 * and the other is used for link status changing.
   2594 		 * In this situation, wm_nq_transmit() is disadvantageous
   2595 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2596 		 */
   2597 		if (wm_is_using_multiqueue(sc))
   2598 			ifp->if_transmit = wm_nq_transmit;
   2599 	} else {
   2600 		ifp->if_start = wm_start;
   2601 		/*
   2602 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2603 		 */
   2604 		if (wm_is_using_multiqueue(sc))
   2605 			ifp->if_transmit = wm_transmit;
   2606 	}
   2607 	ifp->if_watchdog = wm_watchdog;
   2608 	ifp->if_init = wm_init;
   2609 	ifp->if_stop = wm_stop;
   2610 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2611 	IFQ_SET_READY(&ifp->if_snd);
   2612 
   2613 	/* Check for jumbo frame */
   2614 	switch (sc->sc_type) {
   2615 	case WM_T_82573:
   2616 		/* XXX limited to 9234 if ASPM is disabled */
   2617 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2618 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2619 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2620 		break;
   2621 	case WM_T_82571:
   2622 	case WM_T_82572:
   2623 	case WM_T_82574:
   2624 	case WM_T_82575:
   2625 	case WM_T_82576:
   2626 	case WM_T_82580:
   2627 	case WM_T_I350:
   2628 	case WM_T_I354: /* XXXX ok? */
   2629 	case WM_T_I210:
   2630 	case WM_T_I211:
   2631 	case WM_T_80003:
   2632 	case WM_T_ICH9:
   2633 	case WM_T_ICH10:
   2634 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2635 	case WM_T_PCH_LPT:
   2636 	case WM_T_PCH_SPT:
   2637 		/* XXX limited to 9234 */
   2638 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2639 		break;
   2640 	case WM_T_PCH:
   2641 		/* XXX limited to 4096 */
   2642 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2643 		break;
   2644 	case WM_T_82542_2_0:
   2645 	case WM_T_82542_2_1:
   2646 	case WM_T_82583:
   2647 	case WM_T_ICH8:
   2648 		/* No support for jumbo frame */
   2649 		break;
   2650 	default:
   2651 		/* ETHER_MAX_LEN_JUMBO */
   2652 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2653 		break;
   2654 	}
   2655 
   2656 	/* If we're a i82543 or greater, we can support VLANs. */
   2657 	if (sc->sc_type >= WM_T_82543)
   2658 		sc->sc_ethercom.ec_capabilities |=
   2659 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2660 
   2661 	/*
   2662 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2663 	 * on i82543 and later.
   2664 	 */
   2665 	if (sc->sc_type >= WM_T_82543) {
   2666 		ifp->if_capabilities |=
   2667 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2668 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2669 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2670 		    IFCAP_CSUM_TCPv6_Tx |
   2671 		    IFCAP_CSUM_UDPv6_Tx;
   2672 	}
   2673 
   2674 	/*
   2675 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2676 	 *
   2677 	 *	82541GI (8086:1076) ... no
   2678 	 *	82572EI (8086:10b9) ... yes
   2679 	 */
   2680 	if (sc->sc_type >= WM_T_82571) {
   2681 		ifp->if_capabilities |=
   2682 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2683 	}
   2684 
   2685 	/*
   2686 	 * If we're a i82544 or greater (except i82547), we can do
   2687 	 * TCP segmentation offload.
   2688 	 */
   2689 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2690 		ifp->if_capabilities |= IFCAP_TSOv4;
   2691 	}
   2692 
   2693 	if (sc->sc_type >= WM_T_82571) {
   2694 		ifp->if_capabilities |= IFCAP_TSOv6;
   2695 	}
   2696 
   2697 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2698 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2699 
   2700 #ifdef WM_MPSAFE
   2701 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2702 #else
   2703 	sc->sc_core_lock = NULL;
   2704 #endif
   2705 
   2706 	/* Attach the interface. */
   2707 	if_initialize(ifp);
   2708 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2709 	ether_ifattach(ifp, enaddr);
   2710 	if_register(ifp);
   2711 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2712 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2713 			  RND_FLAG_DEFAULT);
   2714 
   2715 #ifdef WM_EVENT_COUNTERS
   2716 	/* Attach event counters. */
   2717 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2718 	    NULL, xname, "linkintr");
   2719 
   2720 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2721 	    NULL, xname, "tx_xoff");
   2722 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2723 	    NULL, xname, "tx_xon");
   2724 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2725 	    NULL, xname, "rx_xoff");
   2726 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2727 	    NULL, xname, "rx_xon");
   2728 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2729 	    NULL, xname, "rx_macctl");
   2730 #endif /* WM_EVENT_COUNTERS */
   2731 
   2732 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2733 		pmf_class_network_register(self, ifp);
   2734 	else
   2735 		aprint_error_dev(self, "couldn't establish power handler\n");
   2736 
   2737 	sc->sc_flags |= WM_F_ATTACHED;
   2738  out:
   2739 	return;
   2740 }
   2741 
   2742 /* The detach function (ca_detach) */
   2743 static int
   2744 wm_detach(device_t self, int flags __unused)
   2745 {
   2746 	struct wm_softc *sc = device_private(self);
   2747 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2748 	int i;
   2749 
   2750 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2751 		return 0;
   2752 
   2753 	/* Stop the interface. Callouts are stopped in it. */
   2754 	wm_stop(ifp, 1);
   2755 
   2756 	pmf_device_deregister(self);
   2757 
   2758 #ifdef WM_EVENT_COUNTERS
   2759 	evcnt_detach(&sc->sc_ev_linkintr);
   2760 
   2761 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2762 	evcnt_detach(&sc->sc_ev_tx_xon);
   2763 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2764 	evcnt_detach(&sc->sc_ev_rx_xon);
   2765 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2766 #endif /* WM_EVENT_COUNTERS */
   2767 
   2768 	/* Tell the firmware about the release */
   2769 	WM_CORE_LOCK(sc);
   2770 	wm_release_manageability(sc);
   2771 	wm_release_hw_control(sc);
   2772 	wm_enable_wakeup(sc);
   2773 	WM_CORE_UNLOCK(sc);
   2774 
   2775 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2776 
   2777 	/* Delete all remaining media. */
   2778 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2779 
   2780 	ether_ifdetach(ifp);
   2781 	if_detach(ifp);
   2782 	if_percpuq_destroy(sc->sc_ipq);
   2783 
   2784 	/* Unload RX dmamaps and free mbufs */
   2785 	for (i = 0; i < sc->sc_nqueues; i++) {
   2786 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2787 		mutex_enter(rxq->rxq_lock);
   2788 		wm_rxdrain(rxq);
   2789 		mutex_exit(rxq->rxq_lock);
   2790 	}
   2791 	/* Must unlock here */
   2792 
   2793 	/* Disestablish the interrupt handler */
   2794 	for (i = 0; i < sc->sc_nintrs; i++) {
   2795 		if (sc->sc_ihs[i] != NULL) {
   2796 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2797 			sc->sc_ihs[i] = NULL;
   2798 		}
   2799 	}
   2800 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2801 
   2802 	wm_free_txrx_queues(sc);
   2803 
   2804 	/* Unmap the registers */
   2805 	if (sc->sc_ss) {
   2806 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2807 		sc->sc_ss = 0;
   2808 	}
   2809 	if (sc->sc_ios) {
   2810 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2811 		sc->sc_ios = 0;
   2812 	}
   2813 	if (sc->sc_flashs) {
   2814 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2815 		sc->sc_flashs = 0;
   2816 	}
   2817 
   2818 	if (sc->sc_core_lock)
   2819 		mutex_obj_free(sc->sc_core_lock);
   2820 	if (sc->sc_ich_phymtx)
   2821 		mutex_obj_free(sc->sc_ich_phymtx);
   2822 	if (sc->sc_ich_nvmmtx)
   2823 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2824 
   2825 	return 0;
   2826 }
   2827 
   2828 static bool
   2829 wm_suspend(device_t self, const pmf_qual_t *qual)
   2830 {
   2831 	struct wm_softc *sc = device_private(self);
   2832 
   2833 	wm_release_manageability(sc);
   2834 	wm_release_hw_control(sc);
   2835 	wm_enable_wakeup(sc);
   2836 
   2837 	return true;
   2838 }
   2839 
   2840 static bool
   2841 wm_resume(device_t self, const pmf_qual_t *qual)
   2842 {
   2843 	struct wm_softc *sc = device_private(self);
   2844 
   2845 	wm_init_manageability(sc);
   2846 
   2847 	return true;
   2848 }
   2849 
   2850 /*
   2851  * wm_watchdog:		[ifnet interface function]
   2852  *
   2853  *	Watchdog timer handler.
   2854  */
   2855 static void
   2856 wm_watchdog(struct ifnet *ifp)
   2857 {
   2858 	int qid;
   2859 	struct wm_softc *sc = ifp->if_softc;
   2860 
   2861 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2862 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2863 
   2864 		wm_watchdog_txq(ifp, txq);
   2865 	}
   2866 
   2867 	/* Reset the interface. */
   2868 	(void) wm_init(ifp);
   2869 
   2870 	/*
   2871 	 * There are still some upper layer processing which call
   2872 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2873 	 */
   2874 	/* Try to get more packets going. */
   2875 	ifp->if_start(ifp);
   2876 }
   2877 
   2878 static void
   2879 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2880 {
   2881 	struct wm_softc *sc = ifp->if_softc;
   2882 
   2883 	/*
   2884 	 * Since we're using delayed interrupts, sweep up
   2885 	 * before we report an error.
   2886 	 */
   2887 	mutex_enter(txq->txq_lock);
   2888 	wm_txeof(sc, txq);
   2889 	mutex_exit(txq->txq_lock);
   2890 
   2891 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2892 #ifdef WM_DEBUG
   2893 		int i, j;
   2894 		struct wm_txsoft *txs;
   2895 #endif
   2896 		log(LOG_ERR,
   2897 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2898 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2899 		    txq->txq_next);
   2900 		ifp->if_oerrors++;
   2901 #ifdef WM_DEBUG
   2902 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2903 		    i = WM_NEXTTXS(txq, i)) {
   2904 		    txs = &txq->txq_soft[i];
   2905 		    printf("txs %d tx %d -> %d\n",
   2906 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2907 		    for (j = txs->txs_firstdesc; ;
   2908 			j = WM_NEXTTX(txq, j)) {
   2909 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2910 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2911 			printf("\t %#08x%08x\n",
   2912 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2913 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2914 			if (j == txs->txs_lastdesc)
   2915 				break;
   2916 			}
   2917 		}
   2918 #endif
   2919 	}
   2920 }
   2921 
   2922 /*
   2923  * wm_tick:
   2924  *
   2925  *	One second timer, used to check link status, sweep up
   2926  *	completed transmit jobs, etc.
   2927  */
   2928 static void
   2929 wm_tick(void *arg)
   2930 {
   2931 	struct wm_softc *sc = arg;
   2932 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2933 #ifndef WM_MPSAFE
   2934 	int s = splnet();
   2935 #endif
   2936 
   2937 	WM_CORE_LOCK(sc);
   2938 
   2939 	if (sc->sc_core_stopping)
   2940 		goto out;
   2941 
   2942 	if (sc->sc_type >= WM_T_82542_2_1) {
   2943 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2944 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2945 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2946 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2947 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2948 	}
   2949 
   2950 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2951 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   2952 	    + CSR_READ(sc, WMREG_CRCERRS)
   2953 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2954 	    + CSR_READ(sc, WMREG_SYMERRC)
   2955 	    + CSR_READ(sc, WMREG_RXERRC)
   2956 	    + CSR_READ(sc, WMREG_SEC)
   2957 	    + CSR_READ(sc, WMREG_CEXTERR)
   2958 	    + CSR_READ(sc, WMREG_RLEC);
   2959 	/*
   2960 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2961 	 * memory. It does not mean the number of dropped packet. Because
   2962 	 * ethernet controller can receive packets in such case if there is
   2963 	 * space in phy's FIFO.
   2964 	 *
   2965 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2966 	 * own EVCNT instead of if_iqdrops.
   2967 	 */
   2968 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2969 
   2970 	if (sc->sc_flags & WM_F_HAS_MII)
   2971 		mii_tick(&sc->sc_mii);
   2972 	else if ((sc->sc_type >= WM_T_82575)
   2973 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2974 		wm_serdes_tick(sc);
   2975 	else
   2976 		wm_tbi_tick(sc);
   2977 
   2978 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2979 out:
   2980 	WM_CORE_UNLOCK(sc);
   2981 #ifndef WM_MPSAFE
   2982 	splx(s);
   2983 #endif
   2984 }
   2985 
   2986 static int
   2987 wm_ifflags_cb(struct ethercom *ec)
   2988 {
   2989 	struct ifnet *ifp = &ec->ec_if;
   2990 	struct wm_softc *sc = ifp->if_softc;
   2991 	int rc = 0;
   2992 
   2993 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2994 		device_xname(sc->sc_dev), __func__));
   2995 
   2996 	WM_CORE_LOCK(sc);
   2997 
   2998 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2999 	sc->sc_if_flags = ifp->if_flags;
   3000 
   3001 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3002 		rc = ENETRESET;
   3003 		goto out;
   3004 	}
   3005 
   3006 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3007 		wm_set_filter(sc);
   3008 
   3009 	wm_set_vlan(sc);
   3010 
   3011 out:
   3012 	WM_CORE_UNLOCK(sc);
   3013 
   3014 	return rc;
   3015 }
   3016 
   3017 /*
   3018  * wm_ioctl:		[ifnet interface function]
   3019  *
   3020  *	Handle control requests from the operator.
   3021  */
   3022 static int
   3023 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3024 {
   3025 	struct wm_softc *sc = ifp->if_softc;
   3026 	struct ifreq *ifr = (struct ifreq *) data;
   3027 	struct ifaddr *ifa = (struct ifaddr *)data;
   3028 	struct sockaddr_dl *sdl;
   3029 	int s, error;
   3030 
   3031 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3032 		device_xname(sc->sc_dev), __func__));
   3033 
   3034 #ifndef WM_MPSAFE
   3035 	s = splnet();
   3036 #endif
   3037 	switch (cmd) {
   3038 	case SIOCSIFMEDIA:
   3039 	case SIOCGIFMEDIA:
   3040 		WM_CORE_LOCK(sc);
   3041 		/* Flow control requires full-duplex mode. */
   3042 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3043 		    (ifr->ifr_media & IFM_FDX) == 0)
   3044 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3045 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3046 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3047 				/* We can do both TXPAUSE and RXPAUSE. */
   3048 				ifr->ifr_media |=
   3049 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3050 			}
   3051 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3052 		}
   3053 		WM_CORE_UNLOCK(sc);
   3054 #ifdef WM_MPSAFE
   3055 		s = splnet();
   3056 #endif
   3057 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3058 #ifdef WM_MPSAFE
   3059 		splx(s);
   3060 #endif
   3061 		break;
   3062 	case SIOCINITIFADDR:
   3063 		WM_CORE_LOCK(sc);
   3064 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3065 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3066 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3067 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3068 			/* unicast address is first multicast entry */
   3069 			wm_set_filter(sc);
   3070 			error = 0;
   3071 			WM_CORE_UNLOCK(sc);
   3072 			break;
   3073 		}
   3074 		WM_CORE_UNLOCK(sc);
   3075 		/*FALLTHROUGH*/
   3076 	default:
   3077 #ifdef WM_MPSAFE
   3078 		s = splnet();
   3079 #endif
   3080 		/* It may call wm_start, so unlock here */
   3081 		error = ether_ioctl(ifp, cmd, data);
   3082 #ifdef WM_MPSAFE
   3083 		splx(s);
   3084 #endif
   3085 		if (error != ENETRESET)
   3086 			break;
   3087 
   3088 		error = 0;
   3089 
   3090 		if (cmd == SIOCSIFCAP) {
   3091 			error = (*ifp->if_init)(ifp);
   3092 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3093 			;
   3094 		else if (ifp->if_flags & IFF_RUNNING) {
   3095 			/*
   3096 			 * Multicast list has changed; set the hardware filter
   3097 			 * accordingly.
   3098 			 */
   3099 			WM_CORE_LOCK(sc);
   3100 			wm_set_filter(sc);
   3101 			WM_CORE_UNLOCK(sc);
   3102 		}
   3103 		break;
   3104 	}
   3105 
   3106 #ifndef WM_MPSAFE
   3107 	splx(s);
   3108 #endif
   3109 	return error;
   3110 }
   3111 
   3112 /* MAC address related */
   3113 
   3114 /*
   3115  * Get the offset of MAC address and return it.
   3116  * If error occured, use offset 0.
   3117  */
   3118 static uint16_t
   3119 wm_check_alt_mac_addr(struct wm_softc *sc)
   3120 {
   3121 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3122 	uint16_t offset = NVM_OFF_MACADDR;
   3123 
   3124 	/* Try to read alternative MAC address pointer */
   3125 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3126 		return 0;
   3127 
   3128 	/* Check pointer if it's valid or not. */
   3129 	if ((offset == 0x0000) || (offset == 0xffff))
   3130 		return 0;
   3131 
   3132 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3133 	/*
   3134 	 * Check whether alternative MAC address is valid or not.
   3135 	 * Some cards have non 0xffff pointer but those don't use
   3136 	 * alternative MAC address in reality.
   3137 	 *
   3138 	 * Check whether the broadcast bit is set or not.
   3139 	 */
   3140 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3141 		if (((myea[0] & 0xff) & 0x01) == 0)
   3142 			return offset; /* Found */
   3143 
   3144 	/* Not found */
   3145 	return 0;
   3146 }
   3147 
   3148 static int
   3149 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3150 {
   3151 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3152 	uint16_t offset = NVM_OFF_MACADDR;
   3153 	int do_invert = 0;
   3154 
   3155 	switch (sc->sc_type) {
   3156 	case WM_T_82580:
   3157 	case WM_T_I350:
   3158 	case WM_T_I354:
   3159 		/* EEPROM Top Level Partitioning */
   3160 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3161 		break;
   3162 	case WM_T_82571:
   3163 	case WM_T_82575:
   3164 	case WM_T_82576:
   3165 	case WM_T_80003:
   3166 	case WM_T_I210:
   3167 	case WM_T_I211:
   3168 		offset = wm_check_alt_mac_addr(sc);
   3169 		if (offset == 0)
   3170 			if ((sc->sc_funcid & 0x01) == 1)
   3171 				do_invert = 1;
   3172 		break;
   3173 	default:
   3174 		if ((sc->sc_funcid & 0x01) == 1)
   3175 			do_invert = 1;
   3176 		break;
   3177 	}
   3178 
   3179 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3180 		goto bad;
   3181 
   3182 	enaddr[0] = myea[0] & 0xff;
   3183 	enaddr[1] = myea[0] >> 8;
   3184 	enaddr[2] = myea[1] & 0xff;
   3185 	enaddr[3] = myea[1] >> 8;
   3186 	enaddr[4] = myea[2] & 0xff;
   3187 	enaddr[5] = myea[2] >> 8;
   3188 
   3189 	/*
   3190 	 * Toggle the LSB of the MAC address on the second port
   3191 	 * of some dual port cards.
   3192 	 */
   3193 	if (do_invert != 0)
   3194 		enaddr[5] ^= 1;
   3195 
   3196 	return 0;
   3197 
   3198  bad:
   3199 	return -1;
   3200 }
   3201 
   3202 /*
   3203  * wm_set_ral:
   3204  *
   3205  *	Set an entery in the receive address list.
   3206  */
   3207 static void
   3208 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3209 {
   3210 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3211 	uint32_t wlock_mac;
   3212 	int rv;
   3213 
   3214 	if (enaddr != NULL) {
   3215 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3216 		    (enaddr[3] << 24);
   3217 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3218 		ral_hi |= RAL_AV;
   3219 	} else {
   3220 		ral_lo = 0;
   3221 		ral_hi = 0;
   3222 	}
   3223 
   3224 	switch (sc->sc_type) {
   3225 	case WM_T_82542_2_0:
   3226 	case WM_T_82542_2_1:
   3227 	case WM_T_82543:
   3228 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3229 		CSR_WRITE_FLUSH(sc);
   3230 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3231 		CSR_WRITE_FLUSH(sc);
   3232 		break;
   3233 	case WM_T_PCH2:
   3234 	case WM_T_PCH_LPT:
   3235 	case WM_T_PCH_SPT:
   3236 		if (idx == 0) {
   3237 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3238 			CSR_WRITE_FLUSH(sc);
   3239 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3240 			CSR_WRITE_FLUSH(sc);
   3241 			return;
   3242 		}
   3243 		if (sc->sc_type != WM_T_PCH2) {
   3244 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3245 			    FWSM_WLOCK_MAC);
   3246 			addrl = WMREG_SHRAL(idx - 1);
   3247 			addrh = WMREG_SHRAH(idx - 1);
   3248 		} else {
   3249 			wlock_mac = 0;
   3250 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3251 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3252 		}
   3253 
   3254 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3255 			rv = wm_get_swflag_ich8lan(sc);
   3256 			if (rv != 0)
   3257 				return;
   3258 			CSR_WRITE(sc, addrl, ral_lo);
   3259 			CSR_WRITE_FLUSH(sc);
   3260 			CSR_WRITE(sc, addrh, ral_hi);
   3261 			CSR_WRITE_FLUSH(sc);
   3262 			wm_put_swflag_ich8lan(sc);
   3263 		}
   3264 
   3265 		break;
   3266 	default:
   3267 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3268 		CSR_WRITE_FLUSH(sc);
   3269 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3270 		CSR_WRITE_FLUSH(sc);
   3271 		break;
   3272 	}
   3273 }
   3274 
   3275 /*
   3276  * wm_mchash:
   3277  *
   3278  *	Compute the hash of the multicast address for the 4096-bit
   3279  *	multicast filter.
   3280  */
   3281 static uint32_t
   3282 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3283 {
   3284 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3285 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3286 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3287 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3288 	uint32_t hash;
   3289 
   3290 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3291 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3292 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3293 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3294 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3295 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3296 		return (hash & 0x3ff);
   3297 	}
   3298 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3299 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3300 
   3301 	return (hash & 0xfff);
   3302 }
   3303 
   3304 /*
   3305  * wm_set_filter:
   3306  *
   3307  *	Set up the receive filter.
   3308  */
   3309 static void
   3310 wm_set_filter(struct wm_softc *sc)
   3311 {
   3312 	struct ethercom *ec = &sc->sc_ethercom;
   3313 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3314 	struct ether_multi *enm;
   3315 	struct ether_multistep step;
   3316 	bus_addr_t mta_reg;
   3317 	uint32_t hash, reg, bit;
   3318 	int i, size, ralmax;
   3319 
   3320 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3321 		device_xname(sc->sc_dev), __func__));
   3322 
   3323 	if (sc->sc_type >= WM_T_82544)
   3324 		mta_reg = WMREG_CORDOVA_MTA;
   3325 	else
   3326 		mta_reg = WMREG_MTA;
   3327 
   3328 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3329 
   3330 	if (ifp->if_flags & IFF_BROADCAST)
   3331 		sc->sc_rctl |= RCTL_BAM;
   3332 	if (ifp->if_flags & IFF_PROMISC) {
   3333 		sc->sc_rctl |= RCTL_UPE;
   3334 		goto allmulti;
   3335 	}
   3336 
   3337 	/*
   3338 	 * Set the station address in the first RAL slot, and
   3339 	 * clear the remaining slots.
   3340 	 */
   3341 	if (sc->sc_type == WM_T_ICH8)
   3342 		size = WM_RAL_TABSIZE_ICH8 -1;
   3343 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3344 	    || (sc->sc_type == WM_T_PCH))
   3345 		size = WM_RAL_TABSIZE_ICH8;
   3346 	else if (sc->sc_type == WM_T_PCH2)
   3347 		size = WM_RAL_TABSIZE_PCH2;
   3348 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3349 		size = WM_RAL_TABSIZE_PCH_LPT;
   3350 	else if (sc->sc_type == WM_T_82575)
   3351 		size = WM_RAL_TABSIZE_82575;
   3352 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3353 		size = WM_RAL_TABSIZE_82576;
   3354 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3355 		size = WM_RAL_TABSIZE_I350;
   3356 	else
   3357 		size = WM_RAL_TABSIZE;
   3358 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3359 
   3360 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3361 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3362 		switch (i) {
   3363 		case 0:
   3364 			/* We can use all entries */
   3365 			ralmax = size;
   3366 			break;
   3367 		case 1:
   3368 			/* Only RAR[0] */
   3369 			ralmax = 1;
   3370 			break;
   3371 		default:
   3372 			/* available SHRA + RAR[0] */
   3373 			ralmax = i + 1;
   3374 		}
   3375 	} else
   3376 		ralmax = size;
   3377 	for (i = 1; i < size; i++) {
   3378 		if (i < ralmax)
   3379 			wm_set_ral(sc, NULL, i);
   3380 	}
   3381 
   3382 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3383 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3384 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3385 	    || (sc->sc_type == WM_T_PCH_SPT))
   3386 		size = WM_ICH8_MC_TABSIZE;
   3387 	else
   3388 		size = WM_MC_TABSIZE;
   3389 	/* Clear out the multicast table. */
   3390 	for (i = 0; i < size; i++) {
   3391 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3392 		CSR_WRITE_FLUSH(sc);
   3393 	}
   3394 
   3395 	ETHER_LOCK(ec);
   3396 	ETHER_FIRST_MULTI(step, ec, enm);
   3397 	while (enm != NULL) {
   3398 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3399 			ETHER_UNLOCK(ec);
   3400 			/*
   3401 			 * We must listen to a range of multicast addresses.
   3402 			 * For now, just accept all multicasts, rather than
   3403 			 * trying to set only those filter bits needed to match
   3404 			 * the range.  (At this time, the only use of address
   3405 			 * ranges is for IP multicast routing, for which the
   3406 			 * range is big enough to require all bits set.)
   3407 			 */
   3408 			goto allmulti;
   3409 		}
   3410 
   3411 		hash = wm_mchash(sc, enm->enm_addrlo);
   3412 
   3413 		reg = (hash >> 5);
   3414 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3415 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3416 		    || (sc->sc_type == WM_T_PCH2)
   3417 		    || (sc->sc_type == WM_T_PCH_LPT)
   3418 		    || (sc->sc_type == WM_T_PCH_SPT))
   3419 			reg &= 0x1f;
   3420 		else
   3421 			reg &= 0x7f;
   3422 		bit = hash & 0x1f;
   3423 
   3424 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3425 		hash |= 1U << bit;
   3426 
   3427 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3428 			/*
   3429 			 * 82544 Errata 9: Certain register cannot be written
   3430 			 * with particular alignments in PCI-X bus operation
   3431 			 * (FCAH, MTA and VFTA).
   3432 			 */
   3433 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3434 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3435 			CSR_WRITE_FLUSH(sc);
   3436 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3437 			CSR_WRITE_FLUSH(sc);
   3438 		} else {
   3439 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3440 			CSR_WRITE_FLUSH(sc);
   3441 		}
   3442 
   3443 		ETHER_NEXT_MULTI(step, enm);
   3444 	}
   3445 	ETHER_UNLOCK(ec);
   3446 
   3447 	ifp->if_flags &= ~IFF_ALLMULTI;
   3448 	goto setit;
   3449 
   3450  allmulti:
   3451 	ifp->if_flags |= IFF_ALLMULTI;
   3452 	sc->sc_rctl |= RCTL_MPE;
   3453 
   3454  setit:
   3455 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3456 }
   3457 
   3458 /* Reset and init related */
   3459 
   3460 static void
   3461 wm_set_vlan(struct wm_softc *sc)
   3462 {
   3463 
   3464 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3465 		device_xname(sc->sc_dev), __func__));
   3466 
   3467 	/* Deal with VLAN enables. */
   3468 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3469 		sc->sc_ctrl |= CTRL_VME;
   3470 	else
   3471 		sc->sc_ctrl &= ~CTRL_VME;
   3472 
   3473 	/* Write the control registers. */
   3474 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3475 }
   3476 
   3477 static void
   3478 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3479 {
   3480 	uint32_t gcr;
   3481 	pcireg_t ctrl2;
   3482 
   3483 	gcr = CSR_READ(sc, WMREG_GCR);
   3484 
   3485 	/* Only take action if timeout value is defaulted to 0 */
   3486 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3487 		goto out;
   3488 
   3489 	if ((gcr & GCR_CAP_VER2) == 0) {
   3490 		gcr |= GCR_CMPL_TMOUT_10MS;
   3491 		goto out;
   3492 	}
   3493 
   3494 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3495 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3496 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3497 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3498 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3499 
   3500 out:
   3501 	/* Disable completion timeout resend */
   3502 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3503 
   3504 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3505 }
   3506 
   3507 void
   3508 wm_get_auto_rd_done(struct wm_softc *sc)
   3509 {
   3510 	int i;
   3511 
   3512 	/* wait for eeprom to reload */
   3513 	switch (sc->sc_type) {
   3514 	case WM_T_82571:
   3515 	case WM_T_82572:
   3516 	case WM_T_82573:
   3517 	case WM_T_82574:
   3518 	case WM_T_82583:
   3519 	case WM_T_82575:
   3520 	case WM_T_82576:
   3521 	case WM_T_82580:
   3522 	case WM_T_I350:
   3523 	case WM_T_I354:
   3524 	case WM_T_I210:
   3525 	case WM_T_I211:
   3526 	case WM_T_80003:
   3527 	case WM_T_ICH8:
   3528 	case WM_T_ICH9:
   3529 		for (i = 0; i < 10; i++) {
   3530 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3531 				break;
   3532 			delay(1000);
   3533 		}
   3534 		if (i == 10) {
   3535 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3536 			    "complete\n", device_xname(sc->sc_dev));
   3537 		}
   3538 		break;
   3539 	default:
   3540 		break;
   3541 	}
   3542 }
   3543 
   3544 void
   3545 wm_lan_init_done(struct wm_softc *sc)
   3546 {
   3547 	uint32_t reg = 0;
   3548 	int i;
   3549 
   3550 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3551 		device_xname(sc->sc_dev), __func__));
   3552 
   3553 	/* Wait for eeprom to reload */
   3554 	switch (sc->sc_type) {
   3555 	case WM_T_ICH10:
   3556 	case WM_T_PCH:
   3557 	case WM_T_PCH2:
   3558 	case WM_T_PCH_LPT:
   3559 	case WM_T_PCH_SPT:
   3560 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3561 			reg = CSR_READ(sc, WMREG_STATUS);
   3562 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3563 				break;
   3564 			delay(100);
   3565 		}
   3566 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3567 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3568 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3569 		}
   3570 		break;
   3571 	default:
   3572 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3573 		    __func__);
   3574 		break;
   3575 	}
   3576 
   3577 	reg &= ~STATUS_LAN_INIT_DONE;
   3578 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3579 }
   3580 
   3581 void
   3582 wm_get_cfg_done(struct wm_softc *sc)
   3583 {
   3584 	int mask;
   3585 	uint32_t reg;
   3586 	int i;
   3587 
   3588 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3589 		device_xname(sc->sc_dev), __func__));
   3590 
   3591 	/* Wait for eeprom to reload */
   3592 	switch (sc->sc_type) {
   3593 	case WM_T_82542_2_0:
   3594 	case WM_T_82542_2_1:
   3595 		/* null */
   3596 		break;
   3597 	case WM_T_82543:
   3598 	case WM_T_82544:
   3599 	case WM_T_82540:
   3600 	case WM_T_82545:
   3601 	case WM_T_82545_3:
   3602 	case WM_T_82546:
   3603 	case WM_T_82546_3:
   3604 	case WM_T_82541:
   3605 	case WM_T_82541_2:
   3606 	case WM_T_82547:
   3607 	case WM_T_82547_2:
   3608 	case WM_T_82573:
   3609 	case WM_T_82574:
   3610 	case WM_T_82583:
   3611 		/* generic */
   3612 		delay(10*1000);
   3613 		break;
   3614 	case WM_T_80003:
   3615 	case WM_T_82571:
   3616 	case WM_T_82572:
   3617 	case WM_T_82575:
   3618 	case WM_T_82576:
   3619 	case WM_T_82580:
   3620 	case WM_T_I350:
   3621 	case WM_T_I354:
   3622 	case WM_T_I210:
   3623 	case WM_T_I211:
   3624 		if (sc->sc_type == WM_T_82571) {
   3625 			/* Only 82571 shares port 0 */
   3626 			mask = EEMNGCTL_CFGDONE_0;
   3627 		} else
   3628 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3629 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3630 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3631 				break;
   3632 			delay(1000);
   3633 		}
   3634 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3635 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3636 				device_xname(sc->sc_dev), __func__));
   3637 		}
   3638 		break;
   3639 	case WM_T_ICH8:
   3640 	case WM_T_ICH9:
   3641 	case WM_T_ICH10:
   3642 	case WM_T_PCH:
   3643 	case WM_T_PCH2:
   3644 	case WM_T_PCH_LPT:
   3645 	case WM_T_PCH_SPT:
   3646 		delay(10*1000);
   3647 		if (sc->sc_type >= WM_T_ICH10)
   3648 			wm_lan_init_done(sc);
   3649 		else
   3650 			wm_get_auto_rd_done(sc);
   3651 
   3652 		reg = CSR_READ(sc, WMREG_STATUS);
   3653 		if ((reg & STATUS_PHYRA) != 0)
   3654 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3655 		break;
   3656 	default:
   3657 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3658 		    __func__);
   3659 		break;
   3660 	}
   3661 }
   3662 
   3663 void
   3664 wm_phy_post_reset(struct wm_softc *sc)
   3665 {
   3666 	uint32_t reg;
   3667 
   3668 	/* This function is only for ICH8 and newer. */
   3669 	if (sc->sc_type < WM_T_ICH8)
   3670 		return;
   3671 
   3672 	if (wm_phy_resetisblocked(sc)) {
   3673 		/* XXX */
   3674 		device_printf(sc->sc_dev, " PHY is blocked\n");
   3675 		return;
   3676 	}
   3677 
   3678 	/* Allow time for h/w to get to quiescent state after reset */
   3679 	delay(10*1000);
   3680 
   3681 	/* Perform any necessary post-reset workarounds */
   3682 	if (sc->sc_type == WM_T_PCH)
   3683 		wm_hv_phy_workaround_ich8lan(sc);
   3684 	if (sc->sc_type == WM_T_PCH2)
   3685 		wm_lv_phy_workaround_ich8lan(sc);
   3686 
   3687 	/* Clear the host wakeup bit after lcd reset */
   3688 	if (sc->sc_type >= WM_T_PCH) {
   3689 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3690 		    BM_PORT_GEN_CFG);
   3691 		reg &= ~BM_WUC_HOST_WU_BIT;
   3692 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3693 		    BM_PORT_GEN_CFG, reg);
   3694 	}
   3695 
   3696 	/* Configure the LCD with the extended configuration region in NVM */
   3697 	wm_init_lcd_from_nvm(sc);
   3698 
   3699 	/* Configure the LCD with the OEM bits in NVM */
   3700 }
   3701 
   3702 void
   3703 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3704 {
   3705 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3706 	uint16_t phy_page = 0;
   3707 
   3708 	switch (sc->sc_type) {
   3709 	case WM_T_ICH8:
   3710 		if (sc->sc_phytype != WMPHY_IGP_3)
   3711 			return;
   3712 
   3713 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3714 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3715 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3716 			break;
   3717 		}
   3718 		/* FALLTHROUGH */
   3719 	case WM_T_PCH:
   3720 	case WM_T_PCH2:
   3721 	case WM_T_PCH_LPT:
   3722 	case WM_T_PCH_SPT:
   3723 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3724 		break;
   3725 	default:
   3726 		return;
   3727 	}
   3728 
   3729 	sc->phy.acquire(sc);
   3730 
   3731 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3732 	if ((reg & sw_cfg_mask) == 0)
   3733 		goto release;
   3734 
   3735 	/*
   3736 	 * Make sure HW does not configure LCD from PHY extended configuration
   3737 	 * before SW configuration
   3738 	 */
   3739 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3740 	if ((sc->sc_type < WM_T_PCH2)
   3741 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3742 		goto release;
   3743 
   3744 	/* word_addr is in DWORD */
   3745 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3746 
   3747 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3748 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3749 
   3750 	if (((sc->sc_type == WM_T_PCH)
   3751 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3752 	    || (sc->sc_type > WM_T_PCH)) {
   3753 		/*
   3754 		 * HW configures the SMBus address and LEDs when the OEM and
   3755 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3756 		 * are cleared, SW will configure them instead.
   3757 		 */
   3758 		device_printf(sc->sc_dev, "%s: need write_smbus()\n",
   3759 		    __func__);
   3760 
   3761 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3762 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3763 	}
   3764 
   3765 	/* Configure LCD from extended configuration region. */
   3766 	for (i = 0; i < cnf_size; i++) {
   3767 		uint16_t reg_data, reg_addr;
   3768 
   3769 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3770 			goto release;
   3771 
   3772 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3773 			goto release;
   3774 
   3775 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   3776 			phy_page = reg_data;
   3777 
   3778 		reg_addr &= IGPHY_MAXREGADDR;
   3779 		reg_addr |= phy_page;
   3780 
   3781 		sc->phy.release(sc); /* XXX */
   3782 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   3783 		sc->phy.acquire(sc); /* XXX */
   3784 	}
   3785 
   3786 release:
   3787 	sc->phy.release(sc);
   3788 	return;
   3789 }
   3790 
   3791 
   3792 /* Init hardware bits */
   3793 void
   3794 wm_initialize_hardware_bits(struct wm_softc *sc)
   3795 {
   3796 	uint32_t tarc0, tarc1, reg;
   3797 
   3798 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3799 		device_xname(sc->sc_dev), __func__));
   3800 
   3801 	/* For 82571 variant, 80003 and ICHs */
   3802 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3803 	    || (sc->sc_type >= WM_T_80003)) {
   3804 
   3805 		/* Transmit Descriptor Control 0 */
   3806 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3807 		reg |= TXDCTL_COUNT_DESC;
   3808 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3809 
   3810 		/* Transmit Descriptor Control 1 */
   3811 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3812 		reg |= TXDCTL_COUNT_DESC;
   3813 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3814 
   3815 		/* TARC0 */
   3816 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3817 		switch (sc->sc_type) {
   3818 		case WM_T_82571:
   3819 		case WM_T_82572:
   3820 		case WM_T_82573:
   3821 		case WM_T_82574:
   3822 		case WM_T_82583:
   3823 		case WM_T_80003:
   3824 			/* Clear bits 30..27 */
   3825 			tarc0 &= ~__BITS(30, 27);
   3826 			break;
   3827 		default:
   3828 			break;
   3829 		}
   3830 
   3831 		switch (sc->sc_type) {
   3832 		case WM_T_82571:
   3833 		case WM_T_82572:
   3834 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3835 
   3836 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3837 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3838 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3839 			/* 8257[12] Errata No.7 */
   3840 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3841 
   3842 			/* TARC1 bit 28 */
   3843 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3844 				tarc1 &= ~__BIT(28);
   3845 			else
   3846 				tarc1 |= __BIT(28);
   3847 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3848 
   3849 			/*
   3850 			 * 8257[12] Errata No.13
   3851 			 * Disable Dyamic Clock Gating.
   3852 			 */
   3853 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3854 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3855 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3856 			break;
   3857 		case WM_T_82573:
   3858 		case WM_T_82574:
   3859 		case WM_T_82583:
   3860 			if ((sc->sc_type == WM_T_82574)
   3861 			    || (sc->sc_type == WM_T_82583))
   3862 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3863 
   3864 			/* Extended Device Control */
   3865 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3866 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3867 			reg |= __BIT(22);	/* Set bit 22 */
   3868 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3869 
   3870 			/* Device Control */
   3871 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3872 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3873 
   3874 			/* PCIe Control Register */
   3875 			/*
   3876 			 * 82573 Errata (unknown).
   3877 			 *
   3878 			 * 82574 Errata 25 and 82583 Errata 12
   3879 			 * "Dropped Rx Packets":
   3880 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3881 			 */
   3882 			reg = CSR_READ(sc, WMREG_GCR);
   3883 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3884 			CSR_WRITE(sc, WMREG_GCR, reg);
   3885 
   3886 			if ((sc->sc_type == WM_T_82574)
   3887 			    || (sc->sc_type == WM_T_82583)) {
   3888 				/*
   3889 				 * Document says this bit must be set for
   3890 				 * proper operation.
   3891 				 */
   3892 				reg = CSR_READ(sc, WMREG_GCR);
   3893 				reg |= __BIT(22);
   3894 				CSR_WRITE(sc, WMREG_GCR, reg);
   3895 
   3896 				/*
   3897 				 * Apply workaround for hardware errata
   3898 				 * documented in errata docs Fixes issue where
   3899 				 * some error prone or unreliable PCIe
   3900 				 * completions are occurring, particularly
   3901 				 * with ASPM enabled. Without fix, issue can
   3902 				 * cause Tx timeouts.
   3903 				 */
   3904 				reg = CSR_READ(sc, WMREG_GCR2);
   3905 				reg |= __BIT(0);
   3906 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3907 			}
   3908 			break;
   3909 		case WM_T_80003:
   3910 			/* TARC0 */
   3911 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3912 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3913 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3914 
   3915 			/* TARC1 bit 28 */
   3916 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3917 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3918 				tarc1 &= ~__BIT(28);
   3919 			else
   3920 				tarc1 |= __BIT(28);
   3921 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3922 			break;
   3923 		case WM_T_ICH8:
   3924 		case WM_T_ICH9:
   3925 		case WM_T_ICH10:
   3926 		case WM_T_PCH:
   3927 		case WM_T_PCH2:
   3928 		case WM_T_PCH_LPT:
   3929 		case WM_T_PCH_SPT:
   3930 			/* TARC0 */
   3931 			if ((sc->sc_type == WM_T_ICH8)
   3932 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3933 				/* Set TARC0 bits 29 and 28 */
   3934 				tarc0 |= __BITS(29, 28);
   3935 			}
   3936 			/* Set TARC0 bits 23,24,26,27 */
   3937 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3938 
   3939 			/* CTRL_EXT */
   3940 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3941 			reg |= __BIT(22);	/* Set bit 22 */
   3942 			/*
   3943 			 * Enable PHY low-power state when MAC is at D3
   3944 			 * w/o WoL
   3945 			 */
   3946 			if (sc->sc_type >= WM_T_PCH)
   3947 				reg |= CTRL_EXT_PHYPDEN;
   3948 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3949 
   3950 			/* TARC1 */
   3951 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3952 			/* bit 28 */
   3953 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3954 				tarc1 &= ~__BIT(28);
   3955 			else
   3956 				tarc1 |= __BIT(28);
   3957 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3958 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3959 
   3960 			/* Device Status */
   3961 			if (sc->sc_type == WM_T_ICH8) {
   3962 				reg = CSR_READ(sc, WMREG_STATUS);
   3963 				reg &= ~__BIT(31);
   3964 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3965 
   3966 			}
   3967 
   3968 			/* IOSFPC */
   3969 			if (sc->sc_type == WM_T_PCH_SPT) {
   3970 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3971 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3972 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3973 			}
   3974 			/*
   3975 			 * Work-around descriptor data corruption issue during
   3976 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3977 			 * capability.
   3978 			 */
   3979 			reg = CSR_READ(sc, WMREG_RFCTL);
   3980 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3981 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3982 			break;
   3983 		default:
   3984 			break;
   3985 		}
   3986 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3987 
   3988 		switch (sc->sc_type) {
   3989 		/*
   3990 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3991 		 * Avoid RSS Hash Value bug.
   3992 		 */
   3993 		case WM_T_82571:
   3994 		case WM_T_82572:
   3995 		case WM_T_82573:
   3996 		case WM_T_80003:
   3997 		case WM_T_ICH8:
   3998 			reg = CSR_READ(sc, WMREG_RFCTL);
   3999 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4000 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4001 			break;
   4002 		case WM_T_82574:
   4003 			/* use extened Rx descriptor. */
   4004 			reg = CSR_READ(sc, WMREG_RFCTL);
   4005 			reg |= WMREG_RFCTL_EXSTEN;
   4006 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4007 			break;
   4008 		default:
   4009 			break;
   4010 		}
   4011 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4012 		/*
   4013 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4014 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4015 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4016 		 * Correctly by the Device"
   4017 		 *
   4018 		 * I354(C2000) Errata AVR53:
   4019 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4020 		 * Hang"
   4021 		 */
   4022 		reg = CSR_READ(sc, WMREG_RFCTL);
   4023 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4024 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4025 	}
   4026 }
   4027 
   4028 static uint32_t
   4029 wm_rxpbs_adjust_82580(uint32_t val)
   4030 {
   4031 	uint32_t rv = 0;
   4032 
   4033 	if (val < __arraycount(wm_82580_rxpbs_table))
   4034 		rv = wm_82580_rxpbs_table[val];
   4035 
   4036 	return rv;
   4037 }
   4038 
   4039 /*
   4040  * wm_reset_phy:
   4041  *
   4042  *	generic PHY reset function.
   4043  *	Same as e1000_phy_hw_reset_generic()
   4044  */
   4045 static void
   4046 wm_reset_phy(struct wm_softc *sc)
   4047 {
   4048 	uint32_t reg;
   4049 
   4050 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4051 		device_xname(sc->sc_dev), __func__));
   4052 	if (wm_phy_resetisblocked(sc))
   4053 		return;
   4054 
   4055 	sc->phy.acquire(sc);
   4056 
   4057 	reg = CSR_READ(sc, WMREG_CTRL);
   4058 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4059 	CSR_WRITE_FLUSH(sc);
   4060 
   4061 	delay(sc->phy.reset_delay_us);
   4062 
   4063 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4064 	CSR_WRITE_FLUSH(sc);
   4065 
   4066 	delay(150);
   4067 
   4068 	sc->phy.release(sc);
   4069 
   4070 	wm_get_cfg_done(sc);
   4071 	wm_phy_post_reset(sc);
   4072 }
   4073 
   4074 static void
   4075 wm_flush_desc_rings(struct wm_softc *sc)
   4076 {
   4077 	pcireg_t preg;
   4078 	uint32_t reg;
   4079 	struct wm_txqueue *txq;
   4080 	wiseman_txdesc_t *txd;
   4081 	int nexttx;
   4082 	uint32_t rctl;
   4083 
   4084 	/* First, disable MULR fix in FEXTNVM11 */
   4085 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4086 	reg |= FEXTNVM11_DIS_MULRFIX;
   4087 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4088 
   4089 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4090 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4091 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4092 		return;
   4093 
   4094 	/* TX */
   4095 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4096 	    device_xname(sc->sc_dev), preg, reg);
   4097 	reg = CSR_READ(sc, WMREG_TCTL);
   4098 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4099 
   4100 	txq = &sc->sc_queue[0].wmq_txq;
   4101 	nexttx = txq->txq_next;
   4102 	txd = &txq->txq_descs[nexttx];
   4103 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4104 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   4105 	txd->wtx_fields.wtxu_status = 0;
   4106 	txd->wtx_fields.wtxu_options = 0;
   4107 	txd->wtx_fields.wtxu_vlan = 0;
   4108 
   4109 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4110 	    BUS_SPACE_BARRIER_WRITE);
   4111 
   4112 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4113 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4114 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4115 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4116 	delay(250);
   4117 
   4118 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4119 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4120 		return;
   4121 
   4122 	/* RX */
   4123 	printf("%s: Need RX flush (reg = %08x)\n",
   4124 	    device_xname(sc->sc_dev), preg);
   4125 	rctl = CSR_READ(sc, WMREG_RCTL);
   4126 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4127 	CSR_WRITE_FLUSH(sc);
   4128 	delay(150);
   4129 
   4130 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4131 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4132 	reg &= 0xffffc000;
   4133 	/*
   4134 	 * update thresholds: prefetch threshold to 31, host threshold
   4135 	 * to 1 and make sure the granularity is "descriptors" and not
   4136 	 * "cache lines"
   4137 	 */
   4138 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4139 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4140 
   4141 	/*
   4142 	 * momentarily enable the RX ring for the changes to take
   4143 	 * effect
   4144 	 */
   4145 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4146 	CSR_WRITE_FLUSH(sc);
   4147 	delay(150);
   4148 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4149 }
   4150 
   4151 /*
   4152  * wm_reset:
   4153  *
   4154  *	Reset the i82542 chip.
   4155  */
   4156 static void
   4157 wm_reset(struct wm_softc *sc)
   4158 {
   4159 	int phy_reset = 0;
   4160 	int i, error = 0;
   4161 	uint32_t reg;
   4162 
   4163 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4164 		device_xname(sc->sc_dev), __func__));
   4165 	KASSERT(sc->sc_type != 0);
   4166 
   4167 	/*
   4168 	 * Allocate on-chip memory according to the MTU size.
   4169 	 * The Packet Buffer Allocation register must be written
   4170 	 * before the chip is reset.
   4171 	 */
   4172 	switch (sc->sc_type) {
   4173 	case WM_T_82547:
   4174 	case WM_T_82547_2:
   4175 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4176 		    PBA_22K : PBA_30K;
   4177 		for (i = 0; i < sc->sc_nqueues; i++) {
   4178 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4179 			txq->txq_fifo_head = 0;
   4180 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4181 			txq->txq_fifo_size =
   4182 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4183 			txq->txq_fifo_stall = 0;
   4184 		}
   4185 		break;
   4186 	case WM_T_82571:
   4187 	case WM_T_82572:
   4188 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4189 	case WM_T_80003:
   4190 		sc->sc_pba = PBA_32K;
   4191 		break;
   4192 	case WM_T_82573:
   4193 		sc->sc_pba = PBA_12K;
   4194 		break;
   4195 	case WM_T_82574:
   4196 	case WM_T_82583:
   4197 		sc->sc_pba = PBA_20K;
   4198 		break;
   4199 	case WM_T_82576:
   4200 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4201 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4202 		break;
   4203 	case WM_T_82580:
   4204 	case WM_T_I350:
   4205 	case WM_T_I354:
   4206 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4207 		break;
   4208 	case WM_T_I210:
   4209 	case WM_T_I211:
   4210 		sc->sc_pba = PBA_34K;
   4211 		break;
   4212 	case WM_T_ICH8:
   4213 		/* Workaround for a bit corruption issue in FIFO memory */
   4214 		sc->sc_pba = PBA_8K;
   4215 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4216 		break;
   4217 	case WM_T_ICH9:
   4218 	case WM_T_ICH10:
   4219 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4220 		    PBA_14K : PBA_10K;
   4221 		break;
   4222 	case WM_T_PCH:
   4223 	case WM_T_PCH2:
   4224 	case WM_T_PCH_LPT:
   4225 	case WM_T_PCH_SPT:
   4226 		sc->sc_pba = PBA_26K;
   4227 		break;
   4228 	default:
   4229 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4230 		    PBA_40K : PBA_48K;
   4231 		break;
   4232 	}
   4233 	/*
   4234 	 * Only old or non-multiqueue devices have the PBA register
   4235 	 * XXX Need special handling for 82575.
   4236 	 */
   4237 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4238 	    || (sc->sc_type == WM_T_82575))
   4239 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4240 
   4241 	/* Prevent the PCI-E bus from sticking */
   4242 	if (sc->sc_flags & WM_F_PCIE) {
   4243 		int timeout = 800;
   4244 
   4245 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4246 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4247 
   4248 		while (timeout--) {
   4249 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4250 			    == 0)
   4251 				break;
   4252 			delay(100);
   4253 		}
   4254 		if (timeout == 0)
   4255 			device_printf(sc->sc_dev,
   4256 			    "failed to disable busmastering\n");
   4257 	}
   4258 
   4259 	/* Set the completion timeout for interface */
   4260 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4261 	    || (sc->sc_type == WM_T_82580)
   4262 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4263 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4264 		wm_set_pcie_completion_timeout(sc);
   4265 
   4266 	/* Clear interrupt */
   4267 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4268 	if (wm_is_using_msix(sc)) {
   4269 		if (sc->sc_type != WM_T_82574) {
   4270 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4271 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4272 		} else {
   4273 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4274 		}
   4275 	}
   4276 
   4277 	/* Stop the transmit and receive processes. */
   4278 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4279 	sc->sc_rctl &= ~RCTL_EN;
   4280 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4281 	CSR_WRITE_FLUSH(sc);
   4282 
   4283 	/* XXX set_tbi_sbp_82543() */
   4284 
   4285 	delay(10*1000);
   4286 
   4287 	/* Must acquire the MDIO ownership before MAC reset */
   4288 	switch (sc->sc_type) {
   4289 	case WM_T_82573:
   4290 	case WM_T_82574:
   4291 	case WM_T_82583:
   4292 		error = wm_get_hw_semaphore_82573(sc);
   4293 		break;
   4294 	default:
   4295 		break;
   4296 	}
   4297 
   4298 	/*
   4299 	 * 82541 Errata 29? & 82547 Errata 28?
   4300 	 * See also the description about PHY_RST bit in CTRL register
   4301 	 * in 8254x_GBe_SDM.pdf.
   4302 	 */
   4303 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4304 		CSR_WRITE(sc, WMREG_CTRL,
   4305 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4306 		CSR_WRITE_FLUSH(sc);
   4307 		delay(5000);
   4308 	}
   4309 
   4310 	switch (sc->sc_type) {
   4311 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4312 	case WM_T_82541:
   4313 	case WM_T_82541_2:
   4314 	case WM_T_82547:
   4315 	case WM_T_82547_2:
   4316 		/*
   4317 		 * On some chipsets, a reset through a memory-mapped write
   4318 		 * cycle can cause the chip to reset before completing the
   4319 		 * write cycle.  This causes major headache that can be
   4320 		 * avoided by issuing the reset via indirect register writes
   4321 		 * through I/O space.
   4322 		 *
   4323 		 * So, if we successfully mapped the I/O BAR at attach time,
   4324 		 * use that.  Otherwise, try our luck with a memory-mapped
   4325 		 * reset.
   4326 		 */
   4327 		if (sc->sc_flags & WM_F_IOH_VALID)
   4328 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4329 		else
   4330 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4331 		break;
   4332 	case WM_T_82545_3:
   4333 	case WM_T_82546_3:
   4334 		/* Use the shadow control register on these chips. */
   4335 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4336 		break;
   4337 	case WM_T_80003:
   4338 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4339 		sc->phy.acquire(sc);
   4340 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4341 		sc->phy.release(sc);
   4342 		break;
   4343 	case WM_T_ICH8:
   4344 	case WM_T_ICH9:
   4345 	case WM_T_ICH10:
   4346 	case WM_T_PCH:
   4347 	case WM_T_PCH2:
   4348 	case WM_T_PCH_LPT:
   4349 	case WM_T_PCH_SPT:
   4350 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4351 		if (wm_phy_resetisblocked(sc) == false) {
   4352 			/*
   4353 			 * Gate automatic PHY configuration by hardware on
   4354 			 * non-managed 82579
   4355 			 */
   4356 			if ((sc->sc_type == WM_T_PCH2)
   4357 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4358 				== 0))
   4359 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4360 
   4361 			reg |= CTRL_PHY_RESET;
   4362 			phy_reset = 1;
   4363 		} else
   4364 			printf("XXX reset is blocked!!!\n");
   4365 		sc->phy.acquire(sc);
   4366 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4367 		/* Don't insert a completion barrier when reset */
   4368 		delay(20*1000);
   4369 		mutex_exit(sc->sc_ich_phymtx);
   4370 		break;
   4371 	case WM_T_82580:
   4372 	case WM_T_I350:
   4373 	case WM_T_I354:
   4374 	case WM_T_I210:
   4375 	case WM_T_I211:
   4376 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4377 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4378 			CSR_WRITE_FLUSH(sc);
   4379 		delay(5000);
   4380 		break;
   4381 	case WM_T_82542_2_0:
   4382 	case WM_T_82542_2_1:
   4383 	case WM_T_82543:
   4384 	case WM_T_82540:
   4385 	case WM_T_82545:
   4386 	case WM_T_82546:
   4387 	case WM_T_82571:
   4388 	case WM_T_82572:
   4389 	case WM_T_82573:
   4390 	case WM_T_82574:
   4391 	case WM_T_82575:
   4392 	case WM_T_82576:
   4393 	case WM_T_82583:
   4394 	default:
   4395 		/* Everything else can safely use the documented method. */
   4396 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4397 		break;
   4398 	}
   4399 
   4400 	/* Must release the MDIO ownership after MAC reset */
   4401 	switch (sc->sc_type) {
   4402 	case WM_T_82573:
   4403 	case WM_T_82574:
   4404 	case WM_T_82583:
   4405 		if (error == 0)
   4406 			wm_put_hw_semaphore_82573(sc);
   4407 		break;
   4408 	default:
   4409 		break;
   4410 	}
   4411 
   4412 	if (phy_reset != 0)
   4413 		wm_get_cfg_done(sc);
   4414 
   4415 	/* reload EEPROM */
   4416 	switch (sc->sc_type) {
   4417 	case WM_T_82542_2_0:
   4418 	case WM_T_82542_2_1:
   4419 	case WM_T_82543:
   4420 	case WM_T_82544:
   4421 		delay(10);
   4422 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4423 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4424 		CSR_WRITE_FLUSH(sc);
   4425 		delay(2000);
   4426 		break;
   4427 	case WM_T_82540:
   4428 	case WM_T_82545:
   4429 	case WM_T_82545_3:
   4430 	case WM_T_82546:
   4431 	case WM_T_82546_3:
   4432 		delay(5*1000);
   4433 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4434 		break;
   4435 	case WM_T_82541:
   4436 	case WM_T_82541_2:
   4437 	case WM_T_82547:
   4438 	case WM_T_82547_2:
   4439 		delay(20000);
   4440 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4441 		break;
   4442 	case WM_T_82571:
   4443 	case WM_T_82572:
   4444 	case WM_T_82573:
   4445 	case WM_T_82574:
   4446 	case WM_T_82583:
   4447 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4448 			delay(10);
   4449 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4450 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4451 			CSR_WRITE_FLUSH(sc);
   4452 		}
   4453 		/* check EECD_EE_AUTORD */
   4454 		wm_get_auto_rd_done(sc);
   4455 		/*
   4456 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4457 		 * is set.
   4458 		 */
   4459 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4460 		    || (sc->sc_type == WM_T_82583))
   4461 			delay(25*1000);
   4462 		break;
   4463 	case WM_T_82575:
   4464 	case WM_T_82576:
   4465 	case WM_T_82580:
   4466 	case WM_T_I350:
   4467 	case WM_T_I354:
   4468 	case WM_T_I210:
   4469 	case WM_T_I211:
   4470 	case WM_T_80003:
   4471 		/* check EECD_EE_AUTORD */
   4472 		wm_get_auto_rd_done(sc);
   4473 		break;
   4474 	case WM_T_ICH8:
   4475 	case WM_T_ICH9:
   4476 	case WM_T_ICH10:
   4477 	case WM_T_PCH:
   4478 	case WM_T_PCH2:
   4479 	case WM_T_PCH_LPT:
   4480 	case WM_T_PCH_SPT:
   4481 		break;
   4482 	default:
   4483 		panic("%s: unknown type\n", __func__);
   4484 	}
   4485 
   4486 	/* Check whether EEPROM is present or not */
   4487 	switch (sc->sc_type) {
   4488 	case WM_T_82575:
   4489 	case WM_T_82576:
   4490 	case WM_T_82580:
   4491 	case WM_T_I350:
   4492 	case WM_T_I354:
   4493 	case WM_T_ICH8:
   4494 	case WM_T_ICH9:
   4495 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4496 			/* Not found */
   4497 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4498 			if (sc->sc_type == WM_T_82575)
   4499 				wm_reset_init_script_82575(sc);
   4500 		}
   4501 		break;
   4502 	default:
   4503 		break;
   4504 	}
   4505 
   4506 	if (phy_reset != 0)
   4507 		wm_phy_post_reset(sc);
   4508 
   4509 	if ((sc->sc_type == WM_T_82580)
   4510 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4511 		/* clear global device reset status bit */
   4512 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4513 	}
   4514 
   4515 	/* Clear any pending interrupt events. */
   4516 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4517 	reg = CSR_READ(sc, WMREG_ICR);
   4518 	if (wm_is_using_msix(sc)) {
   4519 		if (sc->sc_type != WM_T_82574) {
   4520 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4521 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4522 		} else
   4523 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4524 	}
   4525 
   4526 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4527 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4528 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4529 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4530 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4531 		reg |= KABGTXD_BGSQLBIAS;
   4532 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4533 	}
   4534 
   4535 	/* reload sc_ctrl */
   4536 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4537 
   4538 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4539 		wm_set_eee_i350(sc);
   4540 
   4541 	/*
   4542 	 * For PCH, this write will make sure that any noise will be detected
   4543 	 * as a CRC error and be dropped rather than show up as a bad packet
   4544 	 * to the DMA engine
   4545 	 */
   4546 	if (sc->sc_type == WM_T_PCH)
   4547 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4548 
   4549 	if (sc->sc_type >= WM_T_82544)
   4550 		CSR_WRITE(sc, WMREG_WUC, 0);
   4551 
   4552 	wm_reset_mdicnfg_82580(sc);
   4553 
   4554 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4555 		wm_pll_workaround_i210(sc);
   4556 }
   4557 
   4558 /*
   4559  * wm_add_rxbuf:
   4560  *
   4561  *	Add a receive buffer to the indiciated descriptor.
   4562  */
   4563 static int
   4564 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4565 {
   4566 	struct wm_softc *sc = rxq->rxq_sc;
   4567 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4568 	struct mbuf *m;
   4569 	int error;
   4570 
   4571 	KASSERT(mutex_owned(rxq->rxq_lock));
   4572 
   4573 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4574 	if (m == NULL)
   4575 		return ENOBUFS;
   4576 
   4577 	MCLGET(m, M_DONTWAIT);
   4578 	if ((m->m_flags & M_EXT) == 0) {
   4579 		m_freem(m);
   4580 		return ENOBUFS;
   4581 	}
   4582 
   4583 	if (rxs->rxs_mbuf != NULL)
   4584 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4585 
   4586 	rxs->rxs_mbuf = m;
   4587 
   4588 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4589 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4590 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4591 	if (error) {
   4592 		/* XXX XXX XXX */
   4593 		aprint_error_dev(sc->sc_dev,
   4594 		    "unable to load rx DMA map %d, error = %d\n",
   4595 		    idx, error);
   4596 		panic("wm_add_rxbuf");
   4597 	}
   4598 
   4599 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4600 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4601 
   4602 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4603 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4604 			wm_init_rxdesc(rxq, idx);
   4605 	} else
   4606 		wm_init_rxdesc(rxq, idx);
   4607 
   4608 	return 0;
   4609 }
   4610 
   4611 /*
   4612  * wm_rxdrain:
   4613  *
   4614  *	Drain the receive queue.
   4615  */
   4616 static void
   4617 wm_rxdrain(struct wm_rxqueue *rxq)
   4618 {
   4619 	struct wm_softc *sc = rxq->rxq_sc;
   4620 	struct wm_rxsoft *rxs;
   4621 	int i;
   4622 
   4623 	KASSERT(mutex_owned(rxq->rxq_lock));
   4624 
   4625 	for (i = 0; i < WM_NRXDESC; i++) {
   4626 		rxs = &rxq->rxq_soft[i];
   4627 		if (rxs->rxs_mbuf != NULL) {
   4628 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4629 			m_freem(rxs->rxs_mbuf);
   4630 			rxs->rxs_mbuf = NULL;
   4631 		}
   4632 	}
   4633 }
   4634 
   4635 
   4636 /*
   4637  * XXX copy from FreeBSD's sys/net/rss_config.c
   4638  */
   4639 /*
   4640  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4641  * effectiveness may be limited by algorithm choice and available entropy
   4642  * during the boot.
   4643  *
   4644  * XXXRW: And that we don't randomize it yet!
   4645  *
   4646  * This is the default Microsoft RSS specification key which is also
   4647  * the Chelsio T5 firmware default key.
   4648  */
   4649 #define RSS_KEYSIZE 40
   4650 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4651 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4652 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4653 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4654 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4655 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4656 };
   4657 
   4658 /*
   4659  * Caller must pass an array of size sizeof(rss_key).
   4660  *
   4661  * XXX
   4662  * As if_ixgbe may use this function, this function should not be
   4663  * if_wm specific function.
   4664  */
   4665 static void
   4666 wm_rss_getkey(uint8_t *key)
   4667 {
   4668 
   4669 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4670 }
   4671 
   4672 /*
   4673  * Setup registers for RSS.
   4674  *
   4675  * XXX not yet VMDq support
   4676  */
   4677 static void
   4678 wm_init_rss(struct wm_softc *sc)
   4679 {
   4680 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4681 	int i;
   4682 
   4683 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4684 
   4685 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4686 		int qid, reta_ent;
   4687 
   4688 		qid  = i % sc->sc_nqueues;
   4689 		switch(sc->sc_type) {
   4690 		case WM_T_82574:
   4691 			reta_ent = __SHIFTIN(qid,
   4692 			    RETA_ENT_QINDEX_MASK_82574);
   4693 			break;
   4694 		case WM_T_82575:
   4695 			reta_ent = __SHIFTIN(qid,
   4696 			    RETA_ENT_QINDEX1_MASK_82575);
   4697 			break;
   4698 		default:
   4699 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4700 			break;
   4701 		}
   4702 
   4703 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4704 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4705 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4706 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4707 	}
   4708 
   4709 	wm_rss_getkey((uint8_t *)rss_key);
   4710 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4711 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4712 
   4713 	if (sc->sc_type == WM_T_82574)
   4714 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4715 	else
   4716 		mrqc = MRQC_ENABLE_RSS_MQ;
   4717 
   4718 	/*
   4719 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4720 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4721 	 */
   4722 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4723 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4724 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4725 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4726 
   4727 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4728 }
   4729 
   4730 /*
   4731  * Adjust TX and RX queue numbers which the system actulally uses.
   4732  *
   4733  * The numbers are affected by below parameters.
   4734  *     - The nubmer of hardware queues
   4735  *     - The number of MSI-X vectors (= "nvectors" argument)
   4736  *     - ncpu
   4737  */
   4738 static void
   4739 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4740 {
   4741 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4742 
   4743 	if (nvectors < 2) {
   4744 		sc->sc_nqueues = 1;
   4745 		return;
   4746 	}
   4747 
   4748 	switch(sc->sc_type) {
   4749 	case WM_T_82572:
   4750 		hw_ntxqueues = 2;
   4751 		hw_nrxqueues = 2;
   4752 		break;
   4753 	case WM_T_82574:
   4754 		hw_ntxqueues = 2;
   4755 		hw_nrxqueues = 2;
   4756 		break;
   4757 	case WM_T_82575:
   4758 		hw_ntxqueues = 4;
   4759 		hw_nrxqueues = 4;
   4760 		break;
   4761 	case WM_T_82576:
   4762 		hw_ntxqueues = 16;
   4763 		hw_nrxqueues = 16;
   4764 		break;
   4765 	case WM_T_82580:
   4766 	case WM_T_I350:
   4767 	case WM_T_I354:
   4768 		hw_ntxqueues = 8;
   4769 		hw_nrxqueues = 8;
   4770 		break;
   4771 	case WM_T_I210:
   4772 		hw_ntxqueues = 4;
   4773 		hw_nrxqueues = 4;
   4774 		break;
   4775 	case WM_T_I211:
   4776 		hw_ntxqueues = 2;
   4777 		hw_nrxqueues = 2;
   4778 		break;
   4779 		/*
   4780 		 * As below ethernet controllers does not support MSI-X,
   4781 		 * this driver let them not use multiqueue.
   4782 		 *     - WM_T_80003
   4783 		 *     - WM_T_ICH8
   4784 		 *     - WM_T_ICH9
   4785 		 *     - WM_T_ICH10
   4786 		 *     - WM_T_PCH
   4787 		 *     - WM_T_PCH2
   4788 		 *     - WM_T_PCH_LPT
   4789 		 */
   4790 	default:
   4791 		hw_ntxqueues = 1;
   4792 		hw_nrxqueues = 1;
   4793 		break;
   4794 	}
   4795 
   4796 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4797 
   4798 	/*
   4799 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4800 	 * the number of queues used actually.
   4801 	 */
   4802 	if (nvectors < hw_nqueues + 1) {
   4803 		sc->sc_nqueues = nvectors - 1;
   4804 	} else {
   4805 		sc->sc_nqueues = hw_nqueues;
   4806 	}
   4807 
   4808 	/*
   4809 	 * As queues more then cpus cannot improve scaling, we limit
   4810 	 * the number of queues used actually.
   4811 	 */
   4812 	if (ncpu < sc->sc_nqueues)
   4813 		sc->sc_nqueues = ncpu;
   4814 }
   4815 
   4816 static inline bool
   4817 wm_is_using_msix(struct wm_softc *sc)
   4818 {
   4819 
   4820 	return (sc->sc_nintrs > 1);
   4821 }
   4822 
   4823 static inline bool
   4824 wm_is_using_multiqueue(struct wm_softc *sc)
   4825 {
   4826 
   4827 	return (sc->sc_nqueues > 1);
   4828 }
   4829 
   4830 static int
   4831 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4832 {
   4833 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4834 	wmq->wmq_id = qidx;
   4835 	wmq->wmq_intr_idx = intr_idx;
   4836 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4837 #ifdef WM_MPSAFE
   4838 	    | SOFTINT_MPSAFE
   4839 #endif
   4840 	    , wm_handle_queue, wmq);
   4841 	if (wmq->wmq_si != NULL)
   4842 		return 0;
   4843 
   4844 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4845 	    wmq->wmq_id);
   4846 
   4847 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4848 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4849 	return ENOMEM;
   4850 }
   4851 
   4852 /*
   4853  * Both single interrupt MSI and INTx can use this function.
   4854  */
   4855 static int
   4856 wm_setup_legacy(struct wm_softc *sc)
   4857 {
   4858 	pci_chipset_tag_t pc = sc->sc_pc;
   4859 	const char *intrstr = NULL;
   4860 	char intrbuf[PCI_INTRSTR_LEN];
   4861 	int error;
   4862 
   4863 	error = wm_alloc_txrx_queues(sc);
   4864 	if (error) {
   4865 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4866 		    error);
   4867 		return ENOMEM;
   4868 	}
   4869 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4870 	    sizeof(intrbuf));
   4871 #ifdef WM_MPSAFE
   4872 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4873 #endif
   4874 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4875 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4876 	if (sc->sc_ihs[0] == NULL) {
   4877 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4878 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4879 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4880 		return ENOMEM;
   4881 	}
   4882 
   4883 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4884 	sc->sc_nintrs = 1;
   4885 
   4886 	return wm_softint_establish(sc, 0, 0);
   4887 }
   4888 
   4889 static int
   4890 wm_setup_msix(struct wm_softc *sc)
   4891 {
   4892 	void *vih;
   4893 	kcpuset_t *affinity;
   4894 	int qidx, error, intr_idx, txrx_established;
   4895 	pci_chipset_tag_t pc = sc->sc_pc;
   4896 	const char *intrstr = NULL;
   4897 	char intrbuf[PCI_INTRSTR_LEN];
   4898 	char intr_xname[INTRDEVNAMEBUF];
   4899 
   4900 	if (sc->sc_nqueues < ncpu) {
   4901 		/*
   4902 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4903 		 * interrupts start from CPU#1.
   4904 		 */
   4905 		sc->sc_affinity_offset = 1;
   4906 	} else {
   4907 		/*
   4908 		 * In this case, this device use all CPUs. So, we unify
   4909 		 * affinitied cpu_index to msix vector number for readability.
   4910 		 */
   4911 		sc->sc_affinity_offset = 0;
   4912 	}
   4913 
   4914 	error = wm_alloc_txrx_queues(sc);
   4915 	if (error) {
   4916 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4917 		    error);
   4918 		return ENOMEM;
   4919 	}
   4920 
   4921 	kcpuset_create(&affinity, false);
   4922 	intr_idx = 0;
   4923 
   4924 	/*
   4925 	 * TX and RX
   4926 	 */
   4927 	txrx_established = 0;
   4928 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4929 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4930 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4931 
   4932 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4933 		    sizeof(intrbuf));
   4934 #ifdef WM_MPSAFE
   4935 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4936 		    PCI_INTR_MPSAFE, true);
   4937 #endif
   4938 		memset(intr_xname, 0, sizeof(intr_xname));
   4939 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4940 		    device_xname(sc->sc_dev), qidx);
   4941 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4942 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4943 		if (vih == NULL) {
   4944 			aprint_error_dev(sc->sc_dev,
   4945 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4946 			    intrstr ? " at " : "",
   4947 			    intrstr ? intrstr : "");
   4948 
   4949 			goto fail;
   4950 		}
   4951 		kcpuset_zero(affinity);
   4952 		/* Round-robin affinity */
   4953 		kcpuset_set(affinity, affinity_to);
   4954 		error = interrupt_distribute(vih, affinity, NULL);
   4955 		if (error == 0) {
   4956 			aprint_normal_dev(sc->sc_dev,
   4957 			    "for TX and RX interrupting at %s affinity to %u\n",
   4958 			    intrstr, affinity_to);
   4959 		} else {
   4960 			aprint_normal_dev(sc->sc_dev,
   4961 			    "for TX and RX interrupting at %s\n", intrstr);
   4962 		}
   4963 		sc->sc_ihs[intr_idx] = vih;
   4964 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   4965 			goto fail;
   4966 		txrx_established++;
   4967 		intr_idx++;
   4968 	}
   4969 
   4970 	/*
   4971 	 * LINK
   4972 	 */
   4973 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4974 	    sizeof(intrbuf));
   4975 #ifdef WM_MPSAFE
   4976 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4977 #endif
   4978 	memset(intr_xname, 0, sizeof(intr_xname));
   4979 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4980 	    device_xname(sc->sc_dev));
   4981 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4982 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4983 	if (vih == NULL) {
   4984 		aprint_error_dev(sc->sc_dev,
   4985 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4986 		    intrstr ? " at " : "",
   4987 		    intrstr ? intrstr : "");
   4988 
   4989 		goto fail;
   4990 	}
   4991 	/* keep default affinity to LINK interrupt */
   4992 	aprint_normal_dev(sc->sc_dev,
   4993 	    "for LINK interrupting at %s\n", intrstr);
   4994 	sc->sc_ihs[intr_idx] = vih;
   4995 	sc->sc_link_intr_idx = intr_idx;
   4996 
   4997 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4998 	kcpuset_destroy(affinity);
   4999 	return 0;
   5000 
   5001  fail:
   5002 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5003 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5004 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5005 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5006 	}
   5007 
   5008 	kcpuset_destroy(affinity);
   5009 	return ENOMEM;
   5010 }
   5011 
   5012 static void
   5013 wm_turnon(struct wm_softc *sc)
   5014 {
   5015 	int i;
   5016 
   5017 	KASSERT(WM_CORE_LOCKED(sc));
   5018 
   5019 	/*
   5020 	 * must unset stopping flags in ascending order.
   5021 	 */
   5022 	for(i = 0; i < sc->sc_nqueues; i++) {
   5023 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5024 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5025 
   5026 		mutex_enter(txq->txq_lock);
   5027 		txq->txq_stopping = false;
   5028 		mutex_exit(txq->txq_lock);
   5029 
   5030 		mutex_enter(rxq->rxq_lock);
   5031 		rxq->rxq_stopping = false;
   5032 		mutex_exit(rxq->rxq_lock);
   5033 	}
   5034 
   5035 	sc->sc_core_stopping = false;
   5036 }
   5037 
   5038 static void
   5039 wm_turnoff(struct wm_softc *sc)
   5040 {
   5041 	int i;
   5042 
   5043 	KASSERT(WM_CORE_LOCKED(sc));
   5044 
   5045 	sc->sc_core_stopping = true;
   5046 
   5047 	/*
   5048 	 * must set stopping flags in ascending order.
   5049 	 */
   5050 	for(i = 0; i < sc->sc_nqueues; i++) {
   5051 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5052 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5053 
   5054 		mutex_enter(rxq->rxq_lock);
   5055 		rxq->rxq_stopping = true;
   5056 		mutex_exit(rxq->rxq_lock);
   5057 
   5058 		mutex_enter(txq->txq_lock);
   5059 		txq->txq_stopping = true;
   5060 		mutex_exit(txq->txq_lock);
   5061 	}
   5062 }
   5063 
   5064 /*
   5065  * write interrupt interval value to ITR or EITR
   5066  */
   5067 static void
   5068 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5069 {
   5070 
   5071 	if (!wmq->wmq_set_itr)
   5072 		return;
   5073 
   5074 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5075 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5076 
   5077 		/*
   5078 		 * 82575 doesn't have CNT_INGR field.
   5079 		 * So, overwrite counter field by software.
   5080 		 */
   5081 		if (sc->sc_type == WM_T_82575)
   5082 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5083 		else
   5084 			eitr |= EITR_CNT_INGR;
   5085 
   5086 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5087 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5088 		/*
   5089 		 * 82574 has both ITR and EITR. SET EITR when we use
   5090 		 * the multi queue function with MSI-X.
   5091 		 */
   5092 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5093 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5094 	} else {
   5095 		KASSERT(wmq->wmq_id == 0);
   5096 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5097 	}
   5098 
   5099 	wmq->wmq_set_itr = false;
   5100 }
   5101 
   5102 /*
   5103  * TODO
   5104  * Below dynamic calculation of itr is almost the same as linux igb,
   5105  * however it does not fit to wm(4). So, we will have been disable AIM
   5106  * until we will find appropriate calculation of itr.
   5107  */
   5108 /*
   5109  * calculate interrupt interval value to be going to write register in
   5110  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5111  */
   5112 static void
   5113 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5114 {
   5115 #ifdef NOTYET
   5116 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5117 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5118 	uint32_t avg_size = 0;
   5119 	uint32_t new_itr;
   5120 
   5121 	if (rxq->rxq_packets)
   5122 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5123 	if (txq->txq_packets)
   5124 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5125 
   5126 	if (avg_size == 0) {
   5127 		new_itr = 450; /* restore default value */
   5128 		goto out;
   5129 	}
   5130 
   5131 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5132 	avg_size += 24;
   5133 
   5134 	/* Don't starve jumbo frames */
   5135 	avg_size = min(avg_size, 3000);
   5136 
   5137 	/* Give a little boost to mid-size frames */
   5138 	if ((avg_size > 300) && (avg_size < 1200))
   5139 		new_itr = avg_size / 3;
   5140 	else
   5141 		new_itr = avg_size / 2;
   5142 
   5143 out:
   5144 	/*
   5145 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5146 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5147 	 */
   5148 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5149 		new_itr *= 4;
   5150 
   5151 	if (new_itr != wmq->wmq_itr) {
   5152 		wmq->wmq_itr = new_itr;
   5153 		wmq->wmq_set_itr = true;
   5154 	} else
   5155 		wmq->wmq_set_itr = false;
   5156 
   5157 	rxq->rxq_packets = 0;
   5158 	rxq->rxq_bytes = 0;
   5159 	txq->txq_packets = 0;
   5160 	txq->txq_bytes = 0;
   5161 #endif
   5162 }
   5163 
   5164 /*
   5165  * wm_init:		[ifnet interface function]
   5166  *
   5167  *	Initialize the interface.
   5168  */
   5169 static int
   5170 wm_init(struct ifnet *ifp)
   5171 {
   5172 	struct wm_softc *sc = ifp->if_softc;
   5173 	int ret;
   5174 
   5175 	WM_CORE_LOCK(sc);
   5176 	ret = wm_init_locked(ifp);
   5177 	WM_CORE_UNLOCK(sc);
   5178 
   5179 	return ret;
   5180 }
   5181 
   5182 static int
   5183 wm_init_locked(struct ifnet *ifp)
   5184 {
   5185 	struct wm_softc *sc = ifp->if_softc;
   5186 	int i, j, trynum, error = 0;
   5187 	uint32_t reg;
   5188 
   5189 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5190 		device_xname(sc->sc_dev), __func__));
   5191 	KASSERT(WM_CORE_LOCKED(sc));
   5192 
   5193 	/*
   5194 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5195 	 * There is a small but measurable benefit to avoiding the adjusment
   5196 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5197 	 * on such platforms.  One possibility is that the DMA itself is
   5198 	 * slightly more efficient if the front of the entire packet (instead
   5199 	 * of the front of the headers) is aligned.
   5200 	 *
   5201 	 * Note we must always set align_tweak to 0 if we are using
   5202 	 * jumbo frames.
   5203 	 */
   5204 #ifdef __NO_STRICT_ALIGNMENT
   5205 	sc->sc_align_tweak = 0;
   5206 #else
   5207 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5208 		sc->sc_align_tweak = 0;
   5209 	else
   5210 		sc->sc_align_tweak = 2;
   5211 #endif /* __NO_STRICT_ALIGNMENT */
   5212 
   5213 	/* Cancel any pending I/O. */
   5214 	wm_stop_locked(ifp, 0);
   5215 
   5216 	/* update statistics before reset */
   5217 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5218 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5219 
   5220 	/* PCH_SPT hardware workaround */
   5221 	if (sc->sc_type == WM_T_PCH_SPT)
   5222 		wm_flush_desc_rings(sc);
   5223 
   5224 	/* Reset the chip to a known state. */
   5225 	wm_reset(sc);
   5226 
   5227 	/*
   5228 	 * AMT based hardware can now take control from firmware
   5229 	 * Do this after reset.
   5230 	 */
   5231 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5232 		wm_get_hw_control(sc);
   5233 
   5234 	if ((sc->sc_type == WM_T_PCH_SPT) &&
   5235 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5236 		wm_legacy_irq_quirk_spt(sc);
   5237 
   5238 	/* Init hardware bits */
   5239 	wm_initialize_hardware_bits(sc);
   5240 
   5241 	/* Reset the PHY. */
   5242 	if (sc->sc_flags & WM_F_HAS_MII)
   5243 		wm_gmii_reset(sc);
   5244 
   5245 	/* Calculate (E)ITR value */
   5246 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5247 		/*
   5248 		 * For NEWQUEUE's EITR (except for 82575).
   5249 		 * 82575's EITR should be set same throttling value as other
   5250 		 * old controllers' ITR because the interrupt/sec calculation
   5251 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5252 		 *
   5253 		 * 82574's EITR should be set same throttling value as ITR.
   5254 		 *
   5255 		 * For N interrupts/sec, set this value to:
   5256 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5257 		 */
   5258 		sc->sc_itr_init = 450;
   5259 	} else if (sc->sc_type >= WM_T_82543) {
   5260 		/*
   5261 		 * Set up the interrupt throttling register (units of 256ns)
   5262 		 * Note that a footnote in Intel's documentation says this
   5263 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5264 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5265 		 * that that is also true for the 1024ns units of the other
   5266 		 * interrupt-related timer registers -- so, really, we ought
   5267 		 * to divide this value by 4 when the link speed is low.
   5268 		 *
   5269 		 * XXX implement this division at link speed change!
   5270 		 */
   5271 
   5272 		/*
   5273 		 * For N interrupts/sec, set this value to:
   5274 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5275 		 * absolute and packet timer values to this value
   5276 		 * divided by 4 to get "simple timer" behavior.
   5277 		 */
   5278 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5279 	}
   5280 
   5281 	error = wm_init_txrx_queues(sc);
   5282 	if (error)
   5283 		goto out;
   5284 
   5285 	/*
   5286 	 * Clear out the VLAN table -- we don't use it (yet).
   5287 	 */
   5288 	CSR_WRITE(sc, WMREG_VET, 0);
   5289 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5290 		trynum = 10; /* Due to hw errata */
   5291 	else
   5292 		trynum = 1;
   5293 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5294 		for (j = 0; j < trynum; j++)
   5295 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5296 
   5297 	/*
   5298 	 * Set up flow-control parameters.
   5299 	 *
   5300 	 * XXX Values could probably stand some tuning.
   5301 	 */
   5302 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5303 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5304 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5305 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5306 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5307 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5308 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5309 	}
   5310 
   5311 	sc->sc_fcrtl = FCRTL_DFLT;
   5312 	if (sc->sc_type < WM_T_82543) {
   5313 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5314 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5315 	} else {
   5316 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5317 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5318 	}
   5319 
   5320 	if (sc->sc_type == WM_T_80003)
   5321 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5322 	else
   5323 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5324 
   5325 	/* Writes the control register. */
   5326 	wm_set_vlan(sc);
   5327 
   5328 	if (sc->sc_flags & WM_F_HAS_MII) {
   5329 		int val;
   5330 
   5331 		switch (sc->sc_type) {
   5332 		case WM_T_80003:
   5333 		case WM_T_ICH8:
   5334 		case WM_T_ICH9:
   5335 		case WM_T_ICH10:
   5336 		case WM_T_PCH:
   5337 		case WM_T_PCH2:
   5338 		case WM_T_PCH_LPT:
   5339 		case WM_T_PCH_SPT:
   5340 			/*
   5341 			 * Set the mac to wait the maximum time between each
   5342 			 * iteration and increase the max iterations when
   5343 			 * polling the phy; this fixes erroneous timeouts at
   5344 			 * 10Mbps.
   5345 			 */
   5346 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5347 			    0xFFFF);
   5348 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   5349 			val |= 0x3F;
   5350 			wm_kmrn_writereg(sc,
   5351 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   5352 			break;
   5353 		default:
   5354 			break;
   5355 		}
   5356 
   5357 		if (sc->sc_type == WM_T_80003) {
   5358 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   5359 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   5360 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   5361 
   5362 			/* Bypass RX and TX FIFO's */
   5363 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5364 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5365 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5366 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5367 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5368 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5369 		}
   5370 	}
   5371 #if 0
   5372 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5373 #endif
   5374 
   5375 	/* Set up checksum offload parameters. */
   5376 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5377 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5378 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5379 		reg |= RXCSUM_IPOFL;
   5380 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5381 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5382 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5383 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5384 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5385 
   5386 	/* Set registers about MSI-X */
   5387 	if (wm_is_using_msix(sc)) {
   5388 		uint32_t ivar;
   5389 		struct wm_queue *wmq;
   5390 		int qid, qintr_idx;
   5391 
   5392 		if (sc->sc_type == WM_T_82575) {
   5393 			/* Interrupt control */
   5394 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5395 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5396 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5397 
   5398 			/* TX and RX */
   5399 			for (i = 0; i < sc->sc_nqueues; i++) {
   5400 				wmq = &sc->sc_queue[i];
   5401 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5402 				    EITR_TX_QUEUE(wmq->wmq_id)
   5403 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5404 			}
   5405 			/* Link status */
   5406 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5407 			    EITR_OTHER);
   5408 		} else if (sc->sc_type == WM_T_82574) {
   5409 			/* Interrupt control */
   5410 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5411 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5412 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5413 
   5414 			/*
   5415 			 * workaround issue with spurious interrupts
   5416 			 * in MSI-X mode.
   5417 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5418 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5419 			 */
   5420 			reg = CSR_READ(sc, WMREG_RFCTL);
   5421 			reg |= WMREG_RFCTL_ACKDIS;
   5422 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5423 
   5424 			ivar = 0;
   5425 			/* TX and RX */
   5426 			for (i = 0; i < sc->sc_nqueues; i++) {
   5427 				wmq = &sc->sc_queue[i];
   5428 				qid = wmq->wmq_id;
   5429 				qintr_idx = wmq->wmq_intr_idx;
   5430 
   5431 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5432 				    IVAR_TX_MASK_Q_82574(qid));
   5433 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5434 				    IVAR_RX_MASK_Q_82574(qid));
   5435 			}
   5436 			/* Link status */
   5437 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5438 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5439 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5440 		} else {
   5441 			/* Interrupt control */
   5442 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5443 			    | GPIE_EIAME | GPIE_PBA);
   5444 
   5445 			switch (sc->sc_type) {
   5446 			case WM_T_82580:
   5447 			case WM_T_I350:
   5448 			case WM_T_I354:
   5449 			case WM_T_I210:
   5450 			case WM_T_I211:
   5451 				/* TX and RX */
   5452 				for (i = 0; i < sc->sc_nqueues; i++) {
   5453 					wmq = &sc->sc_queue[i];
   5454 					qid = wmq->wmq_id;
   5455 					qintr_idx = wmq->wmq_intr_idx;
   5456 
   5457 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5458 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5459 					ivar |= __SHIFTIN((qintr_idx
   5460 						| IVAR_VALID),
   5461 					    IVAR_TX_MASK_Q(qid));
   5462 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5463 					ivar |= __SHIFTIN((qintr_idx
   5464 						| IVAR_VALID),
   5465 					    IVAR_RX_MASK_Q(qid));
   5466 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5467 				}
   5468 				break;
   5469 			case WM_T_82576:
   5470 				/* TX and RX */
   5471 				for (i = 0; i < sc->sc_nqueues; i++) {
   5472 					wmq = &sc->sc_queue[i];
   5473 					qid = wmq->wmq_id;
   5474 					qintr_idx = wmq->wmq_intr_idx;
   5475 
   5476 					ivar = CSR_READ(sc,
   5477 					    WMREG_IVAR_Q_82576(qid));
   5478 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5479 					ivar |= __SHIFTIN((qintr_idx
   5480 						| IVAR_VALID),
   5481 					    IVAR_TX_MASK_Q_82576(qid));
   5482 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5483 					ivar |= __SHIFTIN((qintr_idx
   5484 						| IVAR_VALID),
   5485 					    IVAR_RX_MASK_Q_82576(qid));
   5486 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5487 					    ivar);
   5488 				}
   5489 				break;
   5490 			default:
   5491 				break;
   5492 			}
   5493 
   5494 			/* Link status */
   5495 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5496 			    IVAR_MISC_OTHER);
   5497 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5498 		}
   5499 
   5500 		if (wm_is_using_multiqueue(sc)) {
   5501 			wm_init_rss(sc);
   5502 
   5503 			/*
   5504 			** NOTE: Receive Full-Packet Checksum Offload
   5505 			** is mutually exclusive with Multiqueue. However
   5506 			** this is not the same as TCP/IP checksums which
   5507 			** still work.
   5508 			*/
   5509 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5510 			reg |= RXCSUM_PCSD;
   5511 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5512 		}
   5513 	}
   5514 
   5515 	/* Set up the interrupt registers. */
   5516 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5517 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5518 	    ICR_RXO | ICR_RXT0;
   5519 	if (wm_is_using_msix(sc)) {
   5520 		uint32_t mask;
   5521 		struct wm_queue *wmq;
   5522 
   5523 		switch (sc->sc_type) {
   5524 		case WM_T_82574:
   5525 			mask = 0;
   5526 			for (i = 0; i < sc->sc_nqueues; i++) {
   5527 				wmq = &sc->sc_queue[i];
   5528 				mask |= ICR_TXQ(wmq->wmq_id);
   5529 				mask |= ICR_RXQ(wmq->wmq_id);
   5530 			}
   5531 			mask |= ICR_OTHER;
   5532 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5533 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5534 			break;
   5535 		default:
   5536 			if (sc->sc_type == WM_T_82575) {
   5537 				mask = 0;
   5538 				for (i = 0; i < sc->sc_nqueues; i++) {
   5539 					wmq = &sc->sc_queue[i];
   5540 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5541 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5542 				}
   5543 				mask |= EITR_OTHER;
   5544 			} else {
   5545 				mask = 0;
   5546 				for (i = 0; i < sc->sc_nqueues; i++) {
   5547 					wmq = &sc->sc_queue[i];
   5548 					mask |= 1 << wmq->wmq_intr_idx;
   5549 				}
   5550 				mask |= 1 << sc->sc_link_intr_idx;
   5551 			}
   5552 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5553 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5554 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5555 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5556 			break;
   5557 		}
   5558 	} else
   5559 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5560 
   5561 	/* Set up the inter-packet gap. */
   5562 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5563 
   5564 	if (sc->sc_type >= WM_T_82543) {
   5565 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5566 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5567 			wm_itrs_writereg(sc, wmq);
   5568 		}
   5569 		/*
   5570 		 * Link interrupts occur much less than TX
   5571 		 * interrupts and RX interrupts. So, we don't
   5572 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5573 		 * FreeBSD's if_igb.
   5574 		 */
   5575 	}
   5576 
   5577 	/* Set the VLAN ethernetype. */
   5578 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5579 
   5580 	/*
   5581 	 * Set up the transmit control register; we start out with
   5582 	 * a collision distance suitable for FDX, but update it whe
   5583 	 * we resolve the media type.
   5584 	 */
   5585 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5586 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5587 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5588 	if (sc->sc_type >= WM_T_82571)
   5589 		sc->sc_tctl |= TCTL_MULR;
   5590 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5591 
   5592 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5593 		/* Write TDT after TCTL.EN is set. See the document. */
   5594 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5595 	}
   5596 
   5597 	if (sc->sc_type == WM_T_80003) {
   5598 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5599 		reg &= ~TCTL_EXT_GCEX_MASK;
   5600 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5601 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5602 	}
   5603 
   5604 	/* Set the media. */
   5605 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5606 		goto out;
   5607 
   5608 	/* Configure for OS presence */
   5609 	wm_init_manageability(sc);
   5610 
   5611 	/*
   5612 	 * Set up the receive control register; we actually program
   5613 	 * the register when we set the receive filter.  Use multicast
   5614 	 * address offset type 0.
   5615 	 *
   5616 	 * Only the i82544 has the ability to strip the incoming
   5617 	 * CRC, so we don't enable that feature.
   5618 	 */
   5619 	sc->sc_mchash_type = 0;
   5620 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5621 	    | RCTL_MO(sc->sc_mchash_type);
   5622 
   5623 	/*
   5624 	 * 82574 use one buffer extended Rx descriptor.
   5625 	 */
   5626 	if (sc->sc_type == WM_T_82574)
   5627 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5628 
   5629 	/*
   5630 	 * The I350 has a bug where it always strips the CRC whether
   5631 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5632 	 */
   5633 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5634 	    || (sc->sc_type == WM_T_I210))
   5635 		sc->sc_rctl |= RCTL_SECRC;
   5636 
   5637 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5638 	    && (ifp->if_mtu > ETHERMTU)) {
   5639 		sc->sc_rctl |= RCTL_LPE;
   5640 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5641 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5642 	}
   5643 
   5644 	if (MCLBYTES == 2048) {
   5645 		sc->sc_rctl |= RCTL_2k;
   5646 	} else {
   5647 		if (sc->sc_type >= WM_T_82543) {
   5648 			switch (MCLBYTES) {
   5649 			case 4096:
   5650 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5651 				break;
   5652 			case 8192:
   5653 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5654 				break;
   5655 			case 16384:
   5656 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5657 				break;
   5658 			default:
   5659 				panic("wm_init: MCLBYTES %d unsupported",
   5660 				    MCLBYTES);
   5661 				break;
   5662 			}
   5663 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5664 	}
   5665 
   5666 	/* Enable ECC */
   5667 	switch (sc->sc_type) {
   5668 	case WM_T_82571:
   5669 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5670 		reg |= PBA_ECC_CORR_EN;
   5671 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5672 		break;
   5673 	case WM_T_PCH_LPT:
   5674 	case WM_T_PCH_SPT:
   5675 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5676 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5677 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5678 
   5679 		sc->sc_ctrl |= CTRL_MEHE;
   5680 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5681 		break;
   5682 	default:
   5683 		break;
   5684 	}
   5685 
   5686 	/* On 575 and later set RDT only if RX enabled */
   5687 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5688 		int qidx;
   5689 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5690 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5691 			for (i = 0; i < WM_NRXDESC; i++) {
   5692 				mutex_enter(rxq->rxq_lock);
   5693 				wm_init_rxdesc(rxq, i);
   5694 				mutex_exit(rxq->rxq_lock);
   5695 
   5696 			}
   5697 		}
   5698 	}
   5699 
   5700 	/* Set the receive filter. */
   5701 	wm_set_filter(sc);
   5702 
   5703 	wm_turnon(sc);
   5704 
   5705 	/* Start the one second link check clock. */
   5706 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5707 
   5708 	/* ...all done! */
   5709 	ifp->if_flags |= IFF_RUNNING;
   5710 	ifp->if_flags &= ~IFF_OACTIVE;
   5711 
   5712  out:
   5713 	sc->sc_if_flags = ifp->if_flags;
   5714 	if (error)
   5715 		log(LOG_ERR, "%s: interface not running\n",
   5716 		    device_xname(sc->sc_dev));
   5717 	return error;
   5718 }
   5719 
   5720 /*
   5721  * wm_stop:		[ifnet interface function]
   5722  *
   5723  *	Stop transmission on the interface.
   5724  */
   5725 static void
   5726 wm_stop(struct ifnet *ifp, int disable)
   5727 {
   5728 	struct wm_softc *sc = ifp->if_softc;
   5729 
   5730 	WM_CORE_LOCK(sc);
   5731 	wm_stop_locked(ifp, disable);
   5732 	WM_CORE_UNLOCK(sc);
   5733 }
   5734 
   5735 static void
   5736 wm_stop_locked(struct ifnet *ifp, int disable)
   5737 {
   5738 	struct wm_softc *sc = ifp->if_softc;
   5739 	struct wm_txsoft *txs;
   5740 	int i, qidx;
   5741 
   5742 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5743 		device_xname(sc->sc_dev), __func__));
   5744 	KASSERT(WM_CORE_LOCKED(sc));
   5745 
   5746 	wm_turnoff(sc);
   5747 
   5748 	/* Stop the one second clock. */
   5749 	callout_stop(&sc->sc_tick_ch);
   5750 
   5751 	/* Stop the 82547 Tx FIFO stall check timer. */
   5752 	if (sc->sc_type == WM_T_82547)
   5753 		callout_stop(&sc->sc_txfifo_ch);
   5754 
   5755 	if (sc->sc_flags & WM_F_HAS_MII) {
   5756 		/* Down the MII. */
   5757 		mii_down(&sc->sc_mii);
   5758 	} else {
   5759 #if 0
   5760 		/* Should we clear PHY's status properly? */
   5761 		wm_reset(sc);
   5762 #endif
   5763 	}
   5764 
   5765 	/* Stop the transmit and receive processes. */
   5766 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5767 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5768 	sc->sc_rctl &= ~RCTL_EN;
   5769 
   5770 	/*
   5771 	 * Clear the interrupt mask to ensure the device cannot assert its
   5772 	 * interrupt line.
   5773 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5774 	 * service any currently pending or shared interrupt.
   5775 	 */
   5776 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5777 	sc->sc_icr = 0;
   5778 	if (wm_is_using_msix(sc)) {
   5779 		if (sc->sc_type != WM_T_82574) {
   5780 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5781 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5782 		} else
   5783 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5784 	}
   5785 
   5786 	/* Release any queued transmit buffers. */
   5787 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5788 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5789 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5790 		mutex_enter(txq->txq_lock);
   5791 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5792 			txs = &txq->txq_soft[i];
   5793 			if (txs->txs_mbuf != NULL) {
   5794 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5795 				m_freem(txs->txs_mbuf);
   5796 				txs->txs_mbuf = NULL;
   5797 			}
   5798 		}
   5799 		mutex_exit(txq->txq_lock);
   5800 	}
   5801 
   5802 	/* Mark the interface as down and cancel the watchdog timer. */
   5803 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5804 	ifp->if_timer = 0;
   5805 
   5806 	if (disable) {
   5807 		for (i = 0; i < sc->sc_nqueues; i++) {
   5808 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5809 			mutex_enter(rxq->rxq_lock);
   5810 			wm_rxdrain(rxq);
   5811 			mutex_exit(rxq->rxq_lock);
   5812 		}
   5813 	}
   5814 
   5815 #if 0 /* notyet */
   5816 	if (sc->sc_type >= WM_T_82544)
   5817 		CSR_WRITE(sc, WMREG_WUC, 0);
   5818 #endif
   5819 }
   5820 
   5821 static void
   5822 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5823 {
   5824 	struct mbuf *m;
   5825 	int i;
   5826 
   5827 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5828 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5829 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5830 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5831 		    m->m_data, m->m_len, m->m_flags);
   5832 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5833 	    i, i == 1 ? "" : "s");
   5834 }
   5835 
   5836 /*
   5837  * wm_82547_txfifo_stall:
   5838  *
   5839  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5840  *	reset the FIFO pointers, and restart packet transmission.
   5841  */
   5842 static void
   5843 wm_82547_txfifo_stall(void *arg)
   5844 {
   5845 	struct wm_softc *sc = arg;
   5846 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5847 
   5848 	mutex_enter(txq->txq_lock);
   5849 
   5850 	if (txq->txq_stopping)
   5851 		goto out;
   5852 
   5853 	if (txq->txq_fifo_stall) {
   5854 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5855 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5856 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5857 			/*
   5858 			 * Packets have drained.  Stop transmitter, reset
   5859 			 * FIFO pointers, restart transmitter, and kick
   5860 			 * the packet queue.
   5861 			 */
   5862 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5863 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5864 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5865 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5866 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5867 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5868 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5869 			CSR_WRITE_FLUSH(sc);
   5870 
   5871 			txq->txq_fifo_head = 0;
   5872 			txq->txq_fifo_stall = 0;
   5873 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5874 		} else {
   5875 			/*
   5876 			 * Still waiting for packets to drain; try again in
   5877 			 * another tick.
   5878 			 */
   5879 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5880 		}
   5881 	}
   5882 
   5883 out:
   5884 	mutex_exit(txq->txq_lock);
   5885 }
   5886 
   5887 /*
   5888  * wm_82547_txfifo_bugchk:
   5889  *
   5890  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5891  *	prevent enqueueing a packet that would wrap around the end
   5892  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5893  *
   5894  *	We do this by checking the amount of space before the end
   5895  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5896  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5897  *	the internal FIFO pointers to the beginning, and restart
   5898  *	transmission on the interface.
   5899  */
   5900 #define	WM_FIFO_HDR		0x10
   5901 #define	WM_82547_PAD_LEN	0x3e0
   5902 static int
   5903 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5904 {
   5905 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5906 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5907 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5908 
   5909 	/* Just return if already stalled. */
   5910 	if (txq->txq_fifo_stall)
   5911 		return 1;
   5912 
   5913 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5914 		/* Stall only occurs in half-duplex mode. */
   5915 		goto send_packet;
   5916 	}
   5917 
   5918 	if (len >= WM_82547_PAD_LEN + space) {
   5919 		txq->txq_fifo_stall = 1;
   5920 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5921 		return 1;
   5922 	}
   5923 
   5924  send_packet:
   5925 	txq->txq_fifo_head += len;
   5926 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5927 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5928 
   5929 	return 0;
   5930 }
   5931 
   5932 static int
   5933 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5934 {
   5935 	int error;
   5936 
   5937 	/*
   5938 	 * Allocate the control data structures, and create and load the
   5939 	 * DMA map for it.
   5940 	 *
   5941 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5942 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5943 	 * both sets within the same 4G segment.
   5944 	 */
   5945 	if (sc->sc_type < WM_T_82544)
   5946 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5947 	else
   5948 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5949 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5950 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5951 	else
   5952 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5953 
   5954 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5955 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5956 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5957 		aprint_error_dev(sc->sc_dev,
   5958 		    "unable to allocate TX control data, error = %d\n",
   5959 		    error);
   5960 		goto fail_0;
   5961 	}
   5962 
   5963 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5964 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5965 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5966 		aprint_error_dev(sc->sc_dev,
   5967 		    "unable to map TX control data, error = %d\n", error);
   5968 		goto fail_1;
   5969 	}
   5970 
   5971 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5972 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5973 		aprint_error_dev(sc->sc_dev,
   5974 		    "unable to create TX control data DMA map, error = %d\n",
   5975 		    error);
   5976 		goto fail_2;
   5977 	}
   5978 
   5979 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5980 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5981 		aprint_error_dev(sc->sc_dev,
   5982 		    "unable to load TX control data DMA map, error = %d\n",
   5983 		    error);
   5984 		goto fail_3;
   5985 	}
   5986 
   5987 	return 0;
   5988 
   5989  fail_3:
   5990 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5991  fail_2:
   5992 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5993 	    WM_TXDESCS_SIZE(txq));
   5994  fail_1:
   5995 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5996  fail_0:
   5997 	return error;
   5998 }
   5999 
   6000 static void
   6001 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6002 {
   6003 
   6004 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6005 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6006 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6007 	    WM_TXDESCS_SIZE(txq));
   6008 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6009 }
   6010 
   6011 static int
   6012 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6013 {
   6014 	int error;
   6015 	size_t rxq_descs_size;
   6016 
   6017 	/*
   6018 	 * Allocate the control data structures, and create and load the
   6019 	 * DMA map for it.
   6020 	 *
   6021 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6022 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6023 	 * both sets within the same 4G segment.
   6024 	 */
   6025 	rxq->rxq_ndesc = WM_NRXDESC;
   6026 	if (sc->sc_type == WM_T_82574)
   6027 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6028 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6029 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6030 	else
   6031 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6032 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6033 
   6034 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6035 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6036 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6037 		aprint_error_dev(sc->sc_dev,
   6038 		    "unable to allocate RX control data, error = %d\n",
   6039 		    error);
   6040 		goto fail_0;
   6041 	}
   6042 
   6043 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6044 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6045 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6046 		aprint_error_dev(sc->sc_dev,
   6047 		    "unable to map RX control data, error = %d\n", error);
   6048 		goto fail_1;
   6049 	}
   6050 
   6051 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6052 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6053 		aprint_error_dev(sc->sc_dev,
   6054 		    "unable to create RX control data DMA map, error = %d\n",
   6055 		    error);
   6056 		goto fail_2;
   6057 	}
   6058 
   6059 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6060 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6061 		aprint_error_dev(sc->sc_dev,
   6062 		    "unable to load RX control data DMA map, error = %d\n",
   6063 		    error);
   6064 		goto fail_3;
   6065 	}
   6066 
   6067 	return 0;
   6068 
   6069  fail_3:
   6070 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6071  fail_2:
   6072 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6073 	    rxq_descs_size);
   6074  fail_1:
   6075 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6076  fail_0:
   6077 	return error;
   6078 }
   6079 
   6080 static void
   6081 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6082 {
   6083 
   6084 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6085 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6086 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6087 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6088 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6089 }
   6090 
   6091 
   6092 static int
   6093 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6094 {
   6095 	int i, error;
   6096 
   6097 	/* Create the transmit buffer DMA maps. */
   6098 	WM_TXQUEUELEN(txq) =
   6099 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6100 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6101 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6102 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6103 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6104 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6105 			aprint_error_dev(sc->sc_dev,
   6106 			    "unable to create Tx DMA map %d, error = %d\n",
   6107 			    i, error);
   6108 			goto fail;
   6109 		}
   6110 	}
   6111 
   6112 	return 0;
   6113 
   6114  fail:
   6115 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6116 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6117 			bus_dmamap_destroy(sc->sc_dmat,
   6118 			    txq->txq_soft[i].txs_dmamap);
   6119 	}
   6120 	return error;
   6121 }
   6122 
   6123 static void
   6124 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6125 {
   6126 	int i;
   6127 
   6128 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6129 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6130 			bus_dmamap_destroy(sc->sc_dmat,
   6131 			    txq->txq_soft[i].txs_dmamap);
   6132 	}
   6133 }
   6134 
   6135 static int
   6136 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6137 {
   6138 	int i, error;
   6139 
   6140 	/* Create the receive buffer DMA maps. */
   6141 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6142 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6143 			    MCLBYTES, 0, 0,
   6144 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6145 			aprint_error_dev(sc->sc_dev,
   6146 			    "unable to create Rx DMA map %d error = %d\n",
   6147 			    i, error);
   6148 			goto fail;
   6149 		}
   6150 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6151 	}
   6152 
   6153 	return 0;
   6154 
   6155  fail:
   6156 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6157 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6158 			bus_dmamap_destroy(sc->sc_dmat,
   6159 			    rxq->rxq_soft[i].rxs_dmamap);
   6160 	}
   6161 	return error;
   6162 }
   6163 
   6164 static void
   6165 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6166 {
   6167 	int i;
   6168 
   6169 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6170 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6171 			bus_dmamap_destroy(sc->sc_dmat,
   6172 			    rxq->rxq_soft[i].rxs_dmamap);
   6173 	}
   6174 }
   6175 
   6176 /*
   6177  * wm_alloc_quques:
   6178  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6179  */
   6180 static int
   6181 wm_alloc_txrx_queues(struct wm_softc *sc)
   6182 {
   6183 	int i, error, tx_done, rx_done;
   6184 
   6185 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6186 	    KM_SLEEP);
   6187 	if (sc->sc_queue == NULL) {
   6188 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6189 		error = ENOMEM;
   6190 		goto fail_0;
   6191 	}
   6192 
   6193 	/*
   6194 	 * For transmission
   6195 	 */
   6196 	error = 0;
   6197 	tx_done = 0;
   6198 	for (i = 0; i < sc->sc_nqueues; i++) {
   6199 #ifdef WM_EVENT_COUNTERS
   6200 		int j;
   6201 		const char *xname;
   6202 #endif
   6203 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6204 		txq->txq_sc = sc;
   6205 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6206 
   6207 		error = wm_alloc_tx_descs(sc, txq);
   6208 		if (error)
   6209 			break;
   6210 		error = wm_alloc_tx_buffer(sc, txq);
   6211 		if (error) {
   6212 			wm_free_tx_descs(sc, txq);
   6213 			break;
   6214 		}
   6215 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6216 		if (txq->txq_interq == NULL) {
   6217 			wm_free_tx_descs(sc, txq);
   6218 			wm_free_tx_buffer(sc, txq);
   6219 			error = ENOMEM;
   6220 			break;
   6221 		}
   6222 
   6223 #ifdef WM_EVENT_COUNTERS
   6224 		xname = device_xname(sc->sc_dev);
   6225 
   6226 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6227 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6228 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6229 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6230 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6231 
   6232 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6233 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6234 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6235 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6236 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6237 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6238 
   6239 		for (j = 0; j < WM_NTXSEGS; j++) {
   6240 			snprintf(txq->txq_txseg_evcnt_names[j],
   6241 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6242 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6243 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6244 		}
   6245 
   6246 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6247 
   6248 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6249 #endif /* WM_EVENT_COUNTERS */
   6250 
   6251 		tx_done++;
   6252 	}
   6253 	if (error)
   6254 		goto fail_1;
   6255 
   6256 	/*
   6257 	 * For recieve
   6258 	 */
   6259 	error = 0;
   6260 	rx_done = 0;
   6261 	for (i = 0; i < sc->sc_nqueues; i++) {
   6262 #ifdef WM_EVENT_COUNTERS
   6263 		const char *xname;
   6264 #endif
   6265 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6266 		rxq->rxq_sc = sc;
   6267 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6268 
   6269 		error = wm_alloc_rx_descs(sc, rxq);
   6270 		if (error)
   6271 			break;
   6272 
   6273 		error = wm_alloc_rx_buffer(sc, rxq);
   6274 		if (error) {
   6275 			wm_free_rx_descs(sc, rxq);
   6276 			break;
   6277 		}
   6278 
   6279 #ifdef WM_EVENT_COUNTERS
   6280 		xname = device_xname(sc->sc_dev);
   6281 
   6282 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6283 
   6284 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6285 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6286 #endif /* WM_EVENT_COUNTERS */
   6287 
   6288 		rx_done++;
   6289 	}
   6290 	if (error)
   6291 		goto fail_2;
   6292 
   6293 	return 0;
   6294 
   6295  fail_2:
   6296 	for (i = 0; i < rx_done; i++) {
   6297 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6298 		wm_free_rx_buffer(sc, rxq);
   6299 		wm_free_rx_descs(sc, rxq);
   6300 		if (rxq->rxq_lock)
   6301 			mutex_obj_free(rxq->rxq_lock);
   6302 	}
   6303  fail_1:
   6304 	for (i = 0; i < tx_done; i++) {
   6305 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6306 		pcq_destroy(txq->txq_interq);
   6307 		wm_free_tx_buffer(sc, txq);
   6308 		wm_free_tx_descs(sc, txq);
   6309 		if (txq->txq_lock)
   6310 			mutex_obj_free(txq->txq_lock);
   6311 	}
   6312 
   6313 	kmem_free(sc->sc_queue,
   6314 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6315  fail_0:
   6316 	return error;
   6317 }
   6318 
   6319 /*
   6320  * wm_free_quques:
   6321  *	Free {tx,rx}descs and {tx,rx} buffers
   6322  */
   6323 static void
   6324 wm_free_txrx_queues(struct wm_softc *sc)
   6325 {
   6326 	int i;
   6327 
   6328 	for (i = 0; i < sc->sc_nqueues; i++) {
   6329 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6330 
   6331 #ifdef WM_EVENT_COUNTERS
   6332 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6333 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6334 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6335 #endif /* WM_EVENT_COUNTERS */
   6336 
   6337 		wm_free_rx_buffer(sc, rxq);
   6338 		wm_free_rx_descs(sc, rxq);
   6339 		if (rxq->rxq_lock)
   6340 			mutex_obj_free(rxq->rxq_lock);
   6341 	}
   6342 
   6343 	for (i = 0; i < sc->sc_nqueues; i++) {
   6344 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6345 		struct mbuf *m;
   6346 #ifdef WM_EVENT_COUNTERS
   6347 		int j;
   6348 
   6349 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6350 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6351 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6352 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6353 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6354 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6355 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6356 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6357 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6358 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6359 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6360 
   6361 		for (j = 0; j < WM_NTXSEGS; j++)
   6362 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6363 
   6364 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6365 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6366 #endif /* WM_EVENT_COUNTERS */
   6367 
   6368 		/* drain txq_interq */
   6369 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6370 			m_freem(m);
   6371 		pcq_destroy(txq->txq_interq);
   6372 
   6373 		wm_free_tx_buffer(sc, txq);
   6374 		wm_free_tx_descs(sc, txq);
   6375 		if (txq->txq_lock)
   6376 			mutex_obj_free(txq->txq_lock);
   6377 	}
   6378 
   6379 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6380 }
   6381 
   6382 static void
   6383 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6384 {
   6385 
   6386 	KASSERT(mutex_owned(txq->txq_lock));
   6387 
   6388 	/* Initialize the transmit descriptor ring. */
   6389 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6390 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6391 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6392 	txq->txq_free = WM_NTXDESC(txq);
   6393 	txq->txq_next = 0;
   6394 }
   6395 
   6396 static void
   6397 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6398     struct wm_txqueue *txq)
   6399 {
   6400 
   6401 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6402 		device_xname(sc->sc_dev), __func__));
   6403 	KASSERT(mutex_owned(txq->txq_lock));
   6404 
   6405 	if (sc->sc_type < WM_T_82543) {
   6406 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6407 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6408 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6409 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6410 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6411 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6412 	} else {
   6413 		int qid = wmq->wmq_id;
   6414 
   6415 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6416 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6417 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6418 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6419 
   6420 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6421 			/*
   6422 			 * Don't write TDT before TCTL.EN is set.
   6423 			 * See the document.
   6424 			 */
   6425 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6426 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6427 			    | TXDCTL_WTHRESH(0));
   6428 		else {
   6429 			/* XXX should update with AIM? */
   6430 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6431 			if (sc->sc_type >= WM_T_82540) {
   6432 				/* should be same */
   6433 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6434 			}
   6435 
   6436 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6437 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6438 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6439 		}
   6440 	}
   6441 }
   6442 
   6443 static void
   6444 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6445 {
   6446 	int i;
   6447 
   6448 	KASSERT(mutex_owned(txq->txq_lock));
   6449 
   6450 	/* Initialize the transmit job descriptors. */
   6451 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6452 		txq->txq_soft[i].txs_mbuf = NULL;
   6453 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6454 	txq->txq_snext = 0;
   6455 	txq->txq_sdirty = 0;
   6456 }
   6457 
   6458 static void
   6459 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6460     struct wm_txqueue *txq)
   6461 {
   6462 
   6463 	KASSERT(mutex_owned(txq->txq_lock));
   6464 
   6465 	/*
   6466 	 * Set up some register offsets that are different between
   6467 	 * the i82542 and the i82543 and later chips.
   6468 	 */
   6469 	if (sc->sc_type < WM_T_82543)
   6470 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6471 	else
   6472 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6473 
   6474 	wm_init_tx_descs(sc, txq);
   6475 	wm_init_tx_regs(sc, wmq, txq);
   6476 	wm_init_tx_buffer(sc, txq);
   6477 }
   6478 
   6479 static void
   6480 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6481     struct wm_rxqueue *rxq)
   6482 {
   6483 
   6484 	KASSERT(mutex_owned(rxq->rxq_lock));
   6485 
   6486 	/*
   6487 	 * Initialize the receive descriptor and receive job
   6488 	 * descriptor rings.
   6489 	 */
   6490 	if (sc->sc_type < WM_T_82543) {
   6491 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6492 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6493 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6494 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6495 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6496 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6497 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6498 
   6499 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6500 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6501 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6502 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6503 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6504 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6505 	} else {
   6506 		int qid = wmq->wmq_id;
   6507 
   6508 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6509 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6510 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6511 
   6512 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6513 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6514 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6515 
   6516 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6517 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6518 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6519 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6520 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6521 			    | RXDCTL_WTHRESH(1));
   6522 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6523 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6524 		} else {
   6525 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6526 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6527 			/* XXX should update with AIM? */
   6528 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6529 			/* MUST be same */
   6530 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6531 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6532 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6533 		}
   6534 	}
   6535 }
   6536 
   6537 static int
   6538 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6539 {
   6540 	struct wm_rxsoft *rxs;
   6541 	int error, i;
   6542 
   6543 	KASSERT(mutex_owned(rxq->rxq_lock));
   6544 
   6545 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6546 		rxs = &rxq->rxq_soft[i];
   6547 		if (rxs->rxs_mbuf == NULL) {
   6548 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6549 				log(LOG_ERR, "%s: unable to allocate or map "
   6550 				    "rx buffer %d, error = %d\n",
   6551 				    device_xname(sc->sc_dev), i, error);
   6552 				/*
   6553 				 * XXX Should attempt to run with fewer receive
   6554 				 * XXX buffers instead of just failing.
   6555 				 */
   6556 				wm_rxdrain(rxq);
   6557 				return ENOMEM;
   6558 			}
   6559 		} else {
   6560 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6561 				wm_init_rxdesc(rxq, i);
   6562 			/*
   6563 			 * For 82575 and newer device, the RX descriptors
   6564 			 * must be initialized after the setting of RCTL.EN in
   6565 			 * wm_set_filter()
   6566 			 */
   6567 		}
   6568 	}
   6569 	rxq->rxq_ptr = 0;
   6570 	rxq->rxq_discard = 0;
   6571 	WM_RXCHAIN_RESET(rxq);
   6572 
   6573 	return 0;
   6574 }
   6575 
   6576 static int
   6577 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6578     struct wm_rxqueue *rxq)
   6579 {
   6580 
   6581 	KASSERT(mutex_owned(rxq->rxq_lock));
   6582 
   6583 	/*
   6584 	 * Set up some register offsets that are different between
   6585 	 * the i82542 and the i82543 and later chips.
   6586 	 */
   6587 	if (sc->sc_type < WM_T_82543)
   6588 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6589 	else
   6590 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6591 
   6592 	wm_init_rx_regs(sc, wmq, rxq);
   6593 	return wm_init_rx_buffer(sc, rxq);
   6594 }
   6595 
   6596 /*
   6597  * wm_init_quques:
   6598  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6599  */
   6600 static int
   6601 wm_init_txrx_queues(struct wm_softc *sc)
   6602 {
   6603 	int i, error = 0;
   6604 
   6605 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6606 		device_xname(sc->sc_dev), __func__));
   6607 
   6608 	for (i = 0; i < sc->sc_nqueues; i++) {
   6609 		struct wm_queue *wmq = &sc->sc_queue[i];
   6610 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6611 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6612 
   6613 		/*
   6614 		 * TODO
   6615 		 * Currently, use constant variable instead of AIM.
   6616 		 * Furthermore, the interrupt interval of multiqueue which use
   6617 		 * polling mode is less than default value.
   6618 		 * More tuning and AIM are required.
   6619 		 */
   6620 		if (wm_is_using_multiqueue(sc))
   6621 			wmq->wmq_itr = 50;
   6622 		else
   6623 			wmq->wmq_itr = sc->sc_itr_init;
   6624 		wmq->wmq_set_itr = true;
   6625 
   6626 		mutex_enter(txq->txq_lock);
   6627 		wm_init_tx_queue(sc, wmq, txq);
   6628 		mutex_exit(txq->txq_lock);
   6629 
   6630 		mutex_enter(rxq->rxq_lock);
   6631 		error = wm_init_rx_queue(sc, wmq, rxq);
   6632 		mutex_exit(rxq->rxq_lock);
   6633 		if (error)
   6634 			break;
   6635 	}
   6636 
   6637 	return error;
   6638 }
   6639 
   6640 /*
   6641  * wm_tx_offload:
   6642  *
   6643  *	Set up TCP/IP checksumming parameters for the
   6644  *	specified packet.
   6645  */
   6646 static int
   6647 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6648     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6649 {
   6650 	struct mbuf *m0 = txs->txs_mbuf;
   6651 	struct livengood_tcpip_ctxdesc *t;
   6652 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6653 	uint32_t ipcse;
   6654 	struct ether_header *eh;
   6655 	int offset, iphl;
   6656 	uint8_t fields;
   6657 
   6658 	/*
   6659 	 * XXX It would be nice if the mbuf pkthdr had offset
   6660 	 * fields for the protocol headers.
   6661 	 */
   6662 
   6663 	eh = mtod(m0, struct ether_header *);
   6664 	switch (htons(eh->ether_type)) {
   6665 	case ETHERTYPE_IP:
   6666 	case ETHERTYPE_IPV6:
   6667 		offset = ETHER_HDR_LEN;
   6668 		break;
   6669 
   6670 	case ETHERTYPE_VLAN:
   6671 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6672 		break;
   6673 
   6674 	default:
   6675 		/*
   6676 		 * Don't support this protocol or encapsulation.
   6677 		 */
   6678 		*fieldsp = 0;
   6679 		*cmdp = 0;
   6680 		return 0;
   6681 	}
   6682 
   6683 	if ((m0->m_pkthdr.csum_flags &
   6684 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6685 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6686 	} else {
   6687 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6688 	}
   6689 	ipcse = offset + iphl - 1;
   6690 
   6691 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6692 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6693 	seg = 0;
   6694 	fields = 0;
   6695 
   6696 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6697 		int hlen = offset + iphl;
   6698 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6699 
   6700 		if (__predict_false(m0->m_len <
   6701 				    (hlen + sizeof(struct tcphdr)))) {
   6702 			/*
   6703 			 * TCP/IP headers are not in the first mbuf; we need
   6704 			 * to do this the slow and painful way.  Let's just
   6705 			 * hope this doesn't happen very often.
   6706 			 */
   6707 			struct tcphdr th;
   6708 
   6709 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6710 
   6711 			m_copydata(m0, hlen, sizeof(th), &th);
   6712 			if (v4) {
   6713 				struct ip ip;
   6714 
   6715 				m_copydata(m0, offset, sizeof(ip), &ip);
   6716 				ip.ip_len = 0;
   6717 				m_copyback(m0,
   6718 				    offset + offsetof(struct ip, ip_len),
   6719 				    sizeof(ip.ip_len), &ip.ip_len);
   6720 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6721 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6722 			} else {
   6723 				struct ip6_hdr ip6;
   6724 
   6725 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6726 				ip6.ip6_plen = 0;
   6727 				m_copyback(m0,
   6728 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6729 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6730 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6731 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6732 			}
   6733 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6734 			    sizeof(th.th_sum), &th.th_sum);
   6735 
   6736 			hlen += th.th_off << 2;
   6737 		} else {
   6738 			/*
   6739 			 * TCP/IP headers are in the first mbuf; we can do
   6740 			 * this the easy way.
   6741 			 */
   6742 			struct tcphdr *th;
   6743 
   6744 			if (v4) {
   6745 				struct ip *ip =
   6746 				    (void *)(mtod(m0, char *) + offset);
   6747 				th = (void *)(mtod(m0, char *) + hlen);
   6748 
   6749 				ip->ip_len = 0;
   6750 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6751 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6752 			} else {
   6753 				struct ip6_hdr *ip6 =
   6754 				    (void *)(mtod(m0, char *) + offset);
   6755 				th = (void *)(mtod(m0, char *) + hlen);
   6756 
   6757 				ip6->ip6_plen = 0;
   6758 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6759 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6760 			}
   6761 			hlen += th->th_off << 2;
   6762 		}
   6763 
   6764 		if (v4) {
   6765 			WM_Q_EVCNT_INCR(txq, txtso);
   6766 			cmdlen |= WTX_TCPIP_CMD_IP;
   6767 		} else {
   6768 			WM_Q_EVCNT_INCR(txq, txtso6);
   6769 			ipcse = 0;
   6770 		}
   6771 		cmd |= WTX_TCPIP_CMD_TSE;
   6772 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6773 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6774 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6775 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6776 	}
   6777 
   6778 	/*
   6779 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6780 	 * offload feature, if we load the context descriptor, we
   6781 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6782 	 */
   6783 
   6784 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6785 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6786 	    WTX_TCPIP_IPCSE(ipcse);
   6787 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6788 		WM_Q_EVCNT_INCR(txq, txipsum);
   6789 		fields |= WTX_IXSM;
   6790 	}
   6791 
   6792 	offset += iphl;
   6793 
   6794 	if (m0->m_pkthdr.csum_flags &
   6795 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6796 		WM_Q_EVCNT_INCR(txq, txtusum);
   6797 		fields |= WTX_TXSM;
   6798 		tucs = WTX_TCPIP_TUCSS(offset) |
   6799 		    WTX_TCPIP_TUCSO(offset +
   6800 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6801 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6802 	} else if ((m0->m_pkthdr.csum_flags &
   6803 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6804 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6805 		fields |= WTX_TXSM;
   6806 		tucs = WTX_TCPIP_TUCSS(offset) |
   6807 		    WTX_TCPIP_TUCSO(offset +
   6808 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6809 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6810 	} else {
   6811 		/* Just initialize it to a valid TCP context. */
   6812 		tucs = WTX_TCPIP_TUCSS(offset) |
   6813 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6814 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6815 	}
   6816 
   6817 	/*
   6818 	 * We don't have to write context descriptor for every packet
   6819 	 * except for 82574. For 82574, we must write context descriptor
   6820 	 * for every packet when we use two descriptor queues.
   6821 	 * It would be overhead to write context descriptor for every packet,
   6822 	 * however it does not cause problems.
   6823 	 */
   6824 	/* Fill in the context descriptor. */
   6825 	t = (struct livengood_tcpip_ctxdesc *)
   6826 	    &txq->txq_descs[txq->txq_next];
   6827 	t->tcpip_ipcs = htole32(ipcs);
   6828 	t->tcpip_tucs = htole32(tucs);
   6829 	t->tcpip_cmdlen = htole32(cmdlen);
   6830 	t->tcpip_seg = htole32(seg);
   6831 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6832 
   6833 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6834 	txs->txs_ndesc++;
   6835 
   6836 	*cmdp = cmd;
   6837 	*fieldsp = fields;
   6838 
   6839 	return 0;
   6840 }
   6841 
   6842 static inline int
   6843 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6844 {
   6845 	struct wm_softc *sc = ifp->if_softc;
   6846 	u_int cpuid = cpu_index(curcpu());
   6847 
   6848 	/*
   6849 	 * Currently, simple distribute strategy.
   6850 	 * TODO:
   6851 	 * distribute by flowid(RSS has value).
   6852 	 */
   6853         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6854 }
   6855 
   6856 /*
   6857  * wm_start:		[ifnet interface function]
   6858  *
   6859  *	Start packet transmission on the interface.
   6860  */
   6861 static void
   6862 wm_start(struct ifnet *ifp)
   6863 {
   6864 	struct wm_softc *sc = ifp->if_softc;
   6865 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6866 
   6867 #ifdef WM_MPSAFE
   6868 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6869 #endif
   6870 	/*
   6871 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6872 	 */
   6873 
   6874 	mutex_enter(txq->txq_lock);
   6875 	if (!txq->txq_stopping)
   6876 		wm_start_locked(ifp);
   6877 	mutex_exit(txq->txq_lock);
   6878 }
   6879 
   6880 static void
   6881 wm_start_locked(struct ifnet *ifp)
   6882 {
   6883 	struct wm_softc *sc = ifp->if_softc;
   6884 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6885 
   6886 	wm_send_common_locked(ifp, txq, false);
   6887 }
   6888 
   6889 static int
   6890 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6891 {
   6892 	int qid;
   6893 	struct wm_softc *sc = ifp->if_softc;
   6894 	struct wm_txqueue *txq;
   6895 
   6896 	qid = wm_select_txqueue(ifp, m);
   6897 	txq = &sc->sc_queue[qid].wmq_txq;
   6898 
   6899 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6900 		m_freem(m);
   6901 		WM_Q_EVCNT_INCR(txq, txdrop);
   6902 		return ENOBUFS;
   6903 	}
   6904 
   6905 	/*
   6906 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6907 	 */
   6908 	ifp->if_obytes += m->m_pkthdr.len;
   6909 	if (m->m_flags & M_MCAST)
   6910 		ifp->if_omcasts++;
   6911 
   6912 	if (mutex_tryenter(txq->txq_lock)) {
   6913 		if (!txq->txq_stopping)
   6914 			wm_transmit_locked(ifp, txq);
   6915 		mutex_exit(txq->txq_lock);
   6916 	}
   6917 
   6918 	return 0;
   6919 }
   6920 
   6921 static void
   6922 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6923 {
   6924 
   6925 	wm_send_common_locked(ifp, txq, true);
   6926 }
   6927 
   6928 static void
   6929 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6930     bool is_transmit)
   6931 {
   6932 	struct wm_softc *sc = ifp->if_softc;
   6933 	struct mbuf *m0;
   6934 	struct m_tag *mtag;
   6935 	struct wm_txsoft *txs;
   6936 	bus_dmamap_t dmamap;
   6937 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6938 	bus_addr_t curaddr;
   6939 	bus_size_t seglen, curlen;
   6940 	uint32_t cksumcmd;
   6941 	uint8_t cksumfields;
   6942 
   6943 	KASSERT(mutex_owned(txq->txq_lock));
   6944 
   6945 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   6946 		return;
   6947 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   6948 		return;
   6949 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6950 		return;
   6951 
   6952 	/* Remember the previous number of free descriptors. */
   6953 	ofree = txq->txq_free;
   6954 
   6955 	/*
   6956 	 * Loop through the send queue, setting up transmit descriptors
   6957 	 * until we drain the queue, or use up all available transmit
   6958 	 * descriptors.
   6959 	 */
   6960 	for (;;) {
   6961 		m0 = NULL;
   6962 
   6963 		/* Get a work queue entry. */
   6964 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6965 			wm_txeof(sc, txq);
   6966 			if (txq->txq_sfree == 0) {
   6967 				DPRINTF(WM_DEBUG_TX,
   6968 				    ("%s: TX: no free job descriptors\n",
   6969 					device_xname(sc->sc_dev)));
   6970 				WM_Q_EVCNT_INCR(txq, txsstall);
   6971 				break;
   6972 			}
   6973 		}
   6974 
   6975 		/* Grab a packet off the queue. */
   6976 		if (is_transmit)
   6977 			m0 = pcq_get(txq->txq_interq);
   6978 		else
   6979 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6980 		if (m0 == NULL)
   6981 			break;
   6982 
   6983 		DPRINTF(WM_DEBUG_TX,
   6984 		    ("%s: TX: have packet to transmit: %p\n",
   6985 		    device_xname(sc->sc_dev), m0));
   6986 
   6987 		txs = &txq->txq_soft[txq->txq_snext];
   6988 		dmamap = txs->txs_dmamap;
   6989 
   6990 		use_tso = (m0->m_pkthdr.csum_flags &
   6991 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6992 
   6993 		/*
   6994 		 * So says the Linux driver:
   6995 		 * The controller does a simple calculation to make sure
   6996 		 * there is enough room in the FIFO before initiating the
   6997 		 * DMA for each buffer.  The calc is:
   6998 		 *	4 = ceil(buffer len / MSS)
   6999 		 * To make sure we don't overrun the FIFO, adjust the max
   7000 		 * buffer len if the MSS drops.
   7001 		 */
   7002 		dmamap->dm_maxsegsz =
   7003 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7004 		    ? m0->m_pkthdr.segsz << 2
   7005 		    : WTX_MAX_LEN;
   7006 
   7007 		/*
   7008 		 * Load the DMA map.  If this fails, the packet either
   7009 		 * didn't fit in the allotted number of segments, or we
   7010 		 * were short on resources.  For the too-many-segments
   7011 		 * case, we simply report an error and drop the packet,
   7012 		 * since we can't sanely copy a jumbo packet to a single
   7013 		 * buffer.
   7014 		 */
   7015 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7016 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7017 		if (error) {
   7018 			if (error == EFBIG) {
   7019 				WM_Q_EVCNT_INCR(txq, txdrop);
   7020 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7021 				    "DMA segments, dropping...\n",
   7022 				    device_xname(sc->sc_dev));
   7023 				wm_dump_mbuf_chain(sc, m0);
   7024 				m_freem(m0);
   7025 				continue;
   7026 			}
   7027 			/*  Short on resources, just stop for now. */
   7028 			DPRINTF(WM_DEBUG_TX,
   7029 			    ("%s: TX: dmamap load failed: %d\n",
   7030 			    device_xname(sc->sc_dev), error));
   7031 			break;
   7032 		}
   7033 
   7034 		segs_needed = dmamap->dm_nsegs;
   7035 		if (use_tso) {
   7036 			/* For sentinel descriptor; see below. */
   7037 			segs_needed++;
   7038 		}
   7039 
   7040 		/*
   7041 		 * Ensure we have enough descriptors free to describe
   7042 		 * the packet.  Note, we always reserve one descriptor
   7043 		 * at the end of the ring due to the semantics of the
   7044 		 * TDT register, plus one more in the event we need
   7045 		 * to load offload context.
   7046 		 */
   7047 		if (segs_needed > txq->txq_free - 2) {
   7048 			/*
   7049 			 * Not enough free descriptors to transmit this
   7050 			 * packet.  We haven't committed anything yet,
   7051 			 * so just unload the DMA map, put the packet
   7052 			 * pack on the queue, and punt.  Notify the upper
   7053 			 * layer that there are no more slots left.
   7054 			 */
   7055 			DPRINTF(WM_DEBUG_TX,
   7056 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7057 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7058 			    segs_needed, txq->txq_free - 1));
   7059 			if (!is_transmit)
   7060 				ifp->if_flags |= IFF_OACTIVE;
   7061 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7062 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7063 			WM_Q_EVCNT_INCR(txq, txdstall);
   7064 			break;
   7065 		}
   7066 
   7067 		/*
   7068 		 * Check for 82547 Tx FIFO bug.  We need to do this
   7069 		 * once we know we can transmit the packet, since we
   7070 		 * do some internal FIFO space accounting here.
   7071 		 */
   7072 		if (sc->sc_type == WM_T_82547 &&
   7073 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7074 			DPRINTF(WM_DEBUG_TX,
   7075 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7076 			    device_xname(sc->sc_dev)));
   7077 			if (!is_transmit)
   7078 				ifp->if_flags |= IFF_OACTIVE;
   7079 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7080 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7081 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   7082 			break;
   7083 		}
   7084 
   7085 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7086 
   7087 		DPRINTF(WM_DEBUG_TX,
   7088 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7089 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7090 
   7091 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7092 
   7093 		/*
   7094 		 * Store a pointer to the packet so that we can free it
   7095 		 * later.
   7096 		 *
   7097 		 * Initially, we consider the number of descriptors the
   7098 		 * packet uses the number of DMA segments.  This may be
   7099 		 * incremented by 1 if we do checksum offload (a descriptor
   7100 		 * is used to set the checksum context).
   7101 		 */
   7102 		txs->txs_mbuf = m0;
   7103 		txs->txs_firstdesc = txq->txq_next;
   7104 		txs->txs_ndesc = segs_needed;
   7105 
   7106 		/* Set up offload parameters for this packet. */
   7107 		if (m0->m_pkthdr.csum_flags &
   7108 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7109 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7110 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7111 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7112 					  &cksumfields) != 0) {
   7113 				/* Error message already displayed. */
   7114 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7115 				continue;
   7116 			}
   7117 		} else {
   7118 			cksumcmd = 0;
   7119 			cksumfields = 0;
   7120 		}
   7121 
   7122 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7123 
   7124 		/* Sync the DMA map. */
   7125 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7126 		    BUS_DMASYNC_PREWRITE);
   7127 
   7128 		/* Initialize the transmit descriptor. */
   7129 		for (nexttx = txq->txq_next, seg = 0;
   7130 		     seg < dmamap->dm_nsegs; seg++) {
   7131 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7132 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7133 			     seglen != 0;
   7134 			     curaddr += curlen, seglen -= curlen,
   7135 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7136 				curlen = seglen;
   7137 
   7138 				/*
   7139 				 * So says the Linux driver:
   7140 				 * Work around for premature descriptor
   7141 				 * write-backs in TSO mode.  Append a
   7142 				 * 4-byte sentinel descriptor.
   7143 				 */
   7144 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7145 				    curlen > 8)
   7146 					curlen -= 4;
   7147 
   7148 				wm_set_dma_addr(
   7149 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7150 				txq->txq_descs[nexttx].wtx_cmdlen
   7151 				    = htole32(cksumcmd | curlen);
   7152 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7153 				    = 0;
   7154 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7155 				    = cksumfields;
   7156 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7157 				lasttx = nexttx;
   7158 
   7159 				DPRINTF(WM_DEBUG_TX,
   7160 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7161 				     "len %#04zx\n",
   7162 				    device_xname(sc->sc_dev), nexttx,
   7163 				    (uint64_t)curaddr, curlen));
   7164 			}
   7165 		}
   7166 
   7167 		KASSERT(lasttx != -1);
   7168 
   7169 		/*
   7170 		 * Set up the command byte on the last descriptor of
   7171 		 * the packet.  If we're in the interrupt delay window,
   7172 		 * delay the interrupt.
   7173 		 */
   7174 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7175 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7176 
   7177 		/*
   7178 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7179 		 * up the descriptor to encapsulate the packet for us.
   7180 		 *
   7181 		 * This is only valid on the last descriptor of the packet.
   7182 		 */
   7183 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7184 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7185 			    htole32(WTX_CMD_VLE);
   7186 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7187 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7188 		}
   7189 
   7190 		txs->txs_lastdesc = lasttx;
   7191 
   7192 		DPRINTF(WM_DEBUG_TX,
   7193 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7194 		    device_xname(sc->sc_dev),
   7195 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7196 
   7197 		/* Sync the descriptors we're using. */
   7198 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7199 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7200 
   7201 		/* Give the packet to the chip. */
   7202 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7203 
   7204 		DPRINTF(WM_DEBUG_TX,
   7205 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7206 
   7207 		DPRINTF(WM_DEBUG_TX,
   7208 		    ("%s: TX: finished transmitting packet, job %d\n",
   7209 		    device_xname(sc->sc_dev), txq->txq_snext));
   7210 
   7211 		/* Advance the tx pointer. */
   7212 		txq->txq_free -= txs->txs_ndesc;
   7213 		txq->txq_next = nexttx;
   7214 
   7215 		txq->txq_sfree--;
   7216 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7217 
   7218 		/* Pass the packet to any BPF listeners. */
   7219 		bpf_mtap(ifp, m0);
   7220 	}
   7221 
   7222 	if (m0 != NULL) {
   7223 		if (!is_transmit)
   7224 			ifp->if_flags |= IFF_OACTIVE;
   7225 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7226 		WM_Q_EVCNT_INCR(txq, txdrop);
   7227 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7228 			__func__));
   7229 		m_freem(m0);
   7230 	}
   7231 
   7232 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7233 		/* No more slots; notify upper layer. */
   7234 		if (!is_transmit)
   7235 			ifp->if_flags |= IFF_OACTIVE;
   7236 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7237 	}
   7238 
   7239 	if (txq->txq_free != ofree) {
   7240 		/* Set a watchdog timer in case the chip flakes out. */
   7241 		ifp->if_timer = 5;
   7242 	}
   7243 }
   7244 
   7245 /*
   7246  * wm_nq_tx_offload:
   7247  *
   7248  *	Set up TCP/IP checksumming parameters for the
   7249  *	specified packet, for NEWQUEUE devices
   7250  */
   7251 static int
   7252 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7253     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7254 {
   7255 	struct mbuf *m0 = txs->txs_mbuf;
   7256 	struct m_tag *mtag;
   7257 	uint32_t vl_len, mssidx, cmdc;
   7258 	struct ether_header *eh;
   7259 	int offset, iphl;
   7260 
   7261 	/*
   7262 	 * XXX It would be nice if the mbuf pkthdr had offset
   7263 	 * fields for the protocol headers.
   7264 	 */
   7265 	*cmdlenp = 0;
   7266 	*fieldsp = 0;
   7267 
   7268 	eh = mtod(m0, struct ether_header *);
   7269 	switch (htons(eh->ether_type)) {
   7270 	case ETHERTYPE_IP:
   7271 	case ETHERTYPE_IPV6:
   7272 		offset = ETHER_HDR_LEN;
   7273 		break;
   7274 
   7275 	case ETHERTYPE_VLAN:
   7276 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7277 		break;
   7278 
   7279 	default:
   7280 		/* Don't support this protocol or encapsulation. */
   7281 		*do_csum = false;
   7282 		return 0;
   7283 	}
   7284 	*do_csum = true;
   7285 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7286 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7287 
   7288 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7289 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7290 
   7291 	if ((m0->m_pkthdr.csum_flags &
   7292 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7293 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7294 	} else {
   7295 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7296 	}
   7297 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7298 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7299 
   7300 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7301 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7302 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7303 		*cmdlenp |= NQTX_CMD_VLE;
   7304 	}
   7305 
   7306 	mssidx = 0;
   7307 
   7308 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7309 		int hlen = offset + iphl;
   7310 		int tcp_hlen;
   7311 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7312 
   7313 		if (__predict_false(m0->m_len <
   7314 				    (hlen + sizeof(struct tcphdr)))) {
   7315 			/*
   7316 			 * TCP/IP headers are not in the first mbuf; we need
   7317 			 * to do this the slow and painful way.  Let's just
   7318 			 * hope this doesn't happen very often.
   7319 			 */
   7320 			struct tcphdr th;
   7321 
   7322 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7323 
   7324 			m_copydata(m0, hlen, sizeof(th), &th);
   7325 			if (v4) {
   7326 				struct ip ip;
   7327 
   7328 				m_copydata(m0, offset, sizeof(ip), &ip);
   7329 				ip.ip_len = 0;
   7330 				m_copyback(m0,
   7331 				    offset + offsetof(struct ip, ip_len),
   7332 				    sizeof(ip.ip_len), &ip.ip_len);
   7333 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7334 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7335 			} else {
   7336 				struct ip6_hdr ip6;
   7337 
   7338 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7339 				ip6.ip6_plen = 0;
   7340 				m_copyback(m0,
   7341 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7342 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7343 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7344 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7345 			}
   7346 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7347 			    sizeof(th.th_sum), &th.th_sum);
   7348 
   7349 			tcp_hlen = th.th_off << 2;
   7350 		} else {
   7351 			/*
   7352 			 * TCP/IP headers are in the first mbuf; we can do
   7353 			 * this the easy way.
   7354 			 */
   7355 			struct tcphdr *th;
   7356 
   7357 			if (v4) {
   7358 				struct ip *ip =
   7359 				    (void *)(mtod(m0, char *) + offset);
   7360 				th = (void *)(mtod(m0, char *) + hlen);
   7361 
   7362 				ip->ip_len = 0;
   7363 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7364 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7365 			} else {
   7366 				struct ip6_hdr *ip6 =
   7367 				    (void *)(mtod(m0, char *) + offset);
   7368 				th = (void *)(mtod(m0, char *) + hlen);
   7369 
   7370 				ip6->ip6_plen = 0;
   7371 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7372 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7373 			}
   7374 			tcp_hlen = th->th_off << 2;
   7375 		}
   7376 		hlen += tcp_hlen;
   7377 		*cmdlenp |= NQTX_CMD_TSE;
   7378 
   7379 		if (v4) {
   7380 			WM_Q_EVCNT_INCR(txq, txtso);
   7381 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7382 		} else {
   7383 			WM_Q_EVCNT_INCR(txq, txtso6);
   7384 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7385 		}
   7386 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7387 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7388 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7389 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7390 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7391 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7392 	} else {
   7393 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7394 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7395 	}
   7396 
   7397 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7398 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7399 		cmdc |= NQTXC_CMD_IP4;
   7400 	}
   7401 
   7402 	if (m0->m_pkthdr.csum_flags &
   7403 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7404 		WM_Q_EVCNT_INCR(txq, txtusum);
   7405 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7406 			cmdc |= NQTXC_CMD_TCP;
   7407 		} else {
   7408 			cmdc |= NQTXC_CMD_UDP;
   7409 		}
   7410 		cmdc |= NQTXC_CMD_IP4;
   7411 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7412 	}
   7413 	if (m0->m_pkthdr.csum_flags &
   7414 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7415 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7416 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7417 			cmdc |= NQTXC_CMD_TCP;
   7418 		} else {
   7419 			cmdc |= NQTXC_CMD_UDP;
   7420 		}
   7421 		cmdc |= NQTXC_CMD_IP6;
   7422 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7423 	}
   7424 
   7425 	/*
   7426 	 * We don't have to write context descriptor for every packet to
   7427 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7428 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7429 	 * controllers.
   7430 	 * It would be overhead to write context descriptor for every packet,
   7431 	 * however it does not cause problems.
   7432 	 */
   7433 	/* Fill in the context descriptor. */
   7434 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7435 	    htole32(vl_len);
   7436 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7437 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7438 	    htole32(cmdc);
   7439 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7440 	    htole32(mssidx);
   7441 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7442 	DPRINTF(WM_DEBUG_TX,
   7443 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7444 	    txq->txq_next, 0, vl_len));
   7445 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7446 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7447 	txs->txs_ndesc++;
   7448 	return 0;
   7449 }
   7450 
   7451 /*
   7452  * wm_nq_start:		[ifnet interface function]
   7453  *
   7454  *	Start packet transmission on the interface for NEWQUEUE devices
   7455  */
   7456 static void
   7457 wm_nq_start(struct ifnet *ifp)
   7458 {
   7459 	struct wm_softc *sc = ifp->if_softc;
   7460 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7461 
   7462 #ifdef WM_MPSAFE
   7463 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7464 #endif
   7465 	/*
   7466 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7467 	 */
   7468 
   7469 	mutex_enter(txq->txq_lock);
   7470 	if (!txq->txq_stopping)
   7471 		wm_nq_start_locked(ifp);
   7472 	mutex_exit(txq->txq_lock);
   7473 }
   7474 
   7475 static void
   7476 wm_nq_start_locked(struct ifnet *ifp)
   7477 {
   7478 	struct wm_softc *sc = ifp->if_softc;
   7479 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7480 
   7481 	wm_nq_send_common_locked(ifp, txq, false);
   7482 }
   7483 
   7484 static int
   7485 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7486 {
   7487 	int qid;
   7488 	struct wm_softc *sc = ifp->if_softc;
   7489 	struct wm_txqueue *txq;
   7490 
   7491 	qid = wm_select_txqueue(ifp, m);
   7492 	txq = &sc->sc_queue[qid].wmq_txq;
   7493 
   7494 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7495 		m_freem(m);
   7496 		WM_Q_EVCNT_INCR(txq, txdrop);
   7497 		return ENOBUFS;
   7498 	}
   7499 
   7500 	/*
   7501 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7502 	 */
   7503 	ifp->if_obytes += m->m_pkthdr.len;
   7504 	if (m->m_flags & M_MCAST)
   7505 		ifp->if_omcasts++;
   7506 
   7507 	/*
   7508 	 * The situations which this mutex_tryenter() fails at running time
   7509 	 * are below two patterns.
   7510 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7511 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7512 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7513 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7514 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7515 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7516 	 */
   7517 	if (mutex_tryenter(txq->txq_lock)) {
   7518 		if (!txq->txq_stopping)
   7519 			wm_nq_transmit_locked(ifp, txq);
   7520 		mutex_exit(txq->txq_lock);
   7521 	}
   7522 
   7523 	return 0;
   7524 }
   7525 
   7526 static void
   7527 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7528 {
   7529 
   7530 	wm_nq_send_common_locked(ifp, txq, true);
   7531 }
   7532 
   7533 static void
   7534 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7535     bool is_transmit)
   7536 {
   7537 	struct wm_softc *sc = ifp->if_softc;
   7538 	struct mbuf *m0;
   7539 	struct m_tag *mtag;
   7540 	struct wm_txsoft *txs;
   7541 	bus_dmamap_t dmamap;
   7542 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7543 	bool do_csum, sent;
   7544 
   7545 	KASSERT(mutex_owned(txq->txq_lock));
   7546 
   7547 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7548 		return;
   7549 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7550 		return;
   7551 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7552 		return;
   7553 
   7554 	sent = false;
   7555 
   7556 	/*
   7557 	 * Loop through the send queue, setting up transmit descriptors
   7558 	 * until we drain the queue, or use up all available transmit
   7559 	 * descriptors.
   7560 	 */
   7561 	for (;;) {
   7562 		m0 = NULL;
   7563 
   7564 		/* Get a work queue entry. */
   7565 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7566 			wm_txeof(sc, txq);
   7567 			if (txq->txq_sfree == 0) {
   7568 				DPRINTF(WM_DEBUG_TX,
   7569 				    ("%s: TX: no free job descriptors\n",
   7570 					device_xname(sc->sc_dev)));
   7571 				WM_Q_EVCNT_INCR(txq, txsstall);
   7572 				break;
   7573 			}
   7574 		}
   7575 
   7576 		/* Grab a packet off the queue. */
   7577 		if (is_transmit)
   7578 			m0 = pcq_get(txq->txq_interq);
   7579 		else
   7580 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7581 		if (m0 == NULL)
   7582 			break;
   7583 
   7584 		DPRINTF(WM_DEBUG_TX,
   7585 		    ("%s: TX: have packet to transmit: %p\n",
   7586 		    device_xname(sc->sc_dev), m0));
   7587 
   7588 		txs = &txq->txq_soft[txq->txq_snext];
   7589 		dmamap = txs->txs_dmamap;
   7590 
   7591 		/*
   7592 		 * Load the DMA map.  If this fails, the packet either
   7593 		 * didn't fit in the allotted number of segments, or we
   7594 		 * were short on resources.  For the too-many-segments
   7595 		 * case, we simply report an error and drop the packet,
   7596 		 * since we can't sanely copy a jumbo packet to a single
   7597 		 * buffer.
   7598 		 */
   7599 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7600 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7601 		if (error) {
   7602 			if (error == EFBIG) {
   7603 				WM_Q_EVCNT_INCR(txq, txdrop);
   7604 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7605 				    "DMA segments, dropping...\n",
   7606 				    device_xname(sc->sc_dev));
   7607 				wm_dump_mbuf_chain(sc, m0);
   7608 				m_freem(m0);
   7609 				continue;
   7610 			}
   7611 			/* Short on resources, just stop for now. */
   7612 			DPRINTF(WM_DEBUG_TX,
   7613 			    ("%s: TX: dmamap load failed: %d\n",
   7614 			    device_xname(sc->sc_dev), error));
   7615 			break;
   7616 		}
   7617 
   7618 		segs_needed = dmamap->dm_nsegs;
   7619 
   7620 		/*
   7621 		 * Ensure we have enough descriptors free to describe
   7622 		 * the packet.  Note, we always reserve one descriptor
   7623 		 * at the end of the ring due to the semantics of the
   7624 		 * TDT register, plus one more in the event we need
   7625 		 * to load offload context.
   7626 		 */
   7627 		if (segs_needed > txq->txq_free - 2) {
   7628 			/*
   7629 			 * Not enough free descriptors to transmit this
   7630 			 * packet.  We haven't committed anything yet,
   7631 			 * so just unload the DMA map, put the packet
   7632 			 * pack on the queue, and punt.  Notify the upper
   7633 			 * layer that there are no more slots left.
   7634 			 */
   7635 			DPRINTF(WM_DEBUG_TX,
   7636 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7637 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7638 			    segs_needed, txq->txq_free - 1));
   7639 			if (!is_transmit)
   7640 				ifp->if_flags |= IFF_OACTIVE;
   7641 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7642 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7643 			WM_Q_EVCNT_INCR(txq, txdstall);
   7644 			break;
   7645 		}
   7646 
   7647 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7648 
   7649 		DPRINTF(WM_DEBUG_TX,
   7650 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7651 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7652 
   7653 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7654 
   7655 		/*
   7656 		 * Store a pointer to the packet so that we can free it
   7657 		 * later.
   7658 		 *
   7659 		 * Initially, we consider the number of descriptors the
   7660 		 * packet uses the number of DMA segments.  This may be
   7661 		 * incremented by 1 if we do checksum offload (a descriptor
   7662 		 * is used to set the checksum context).
   7663 		 */
   7664 		txs->txs_mbuf = m0;
   7665 		txs->txs_firstdesc = txq->txq_next;
   7666 		txs->txs_ndesc = segs_needed;
   7667 
   7668 		/* Set up offload parameters for this packet. */
   7669 		uint32_t cmdlen, fields, dcmdlen;
   7670 		if (m0->m_pkthdr.csum_flags &
   7671 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7672 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7673 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7674 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7675 			    &do_csum) != 0) {
   7676 				/* Error message already displayed. */
   7677 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7678 				continue;
   7679 			}
   7680 		} else {
   7681 			do_csum = false;
   7682 			cmdlen = 0;
   7683 			fields = 0;
   7684 		}
   7685 
   7686 		/* Sync the DMA map. */
   7687 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7688 		    BUS_DMASYNC_PREWRITE);
   7689 
   7690 		/* Initialize the first transmit descriptor. */
   7691 		nexttx = txq->txq_next;
   7692 		if (!do_csum) {
   7693 			/* setup a legacy descriptor */
   7694 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7695 			    dmamap->dm_segs[0].ds_addr);
   7696 			txq->txq_descs[nexttx].wtx_cmdlen =
   7697 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7698 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7699 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7700 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7701 			    NULL) {
   7702 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7703 				    htole32(WTX_CMD_VLE);
   7704 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7705 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7706 			} else {
   7707 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7708 			}
   7709 			dcmdlen = 0;
   7710 		} else {
   7711 			/* setup an advanced data descriptor */
   7712 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7713 			    htole64(dmamap->dm_segs[0].ds_addr);
   7714 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7715 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7716 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7717 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7718 			    htole32(fields);
   7719 			DPRINTF(WM_DEBUG_TX,
   7720 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7721 			    device_xname(sc->sc_dev), nexttx,
   7722 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7723 			DPRINTF(WM_DEBUG_TX,
   7724 			    ("\t 0x%08x%08x\n", fields,
   7725 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7726 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7727 		}
   7728 
   7729 		lasttx = nexttx;
   7730 		nexttx = WM_NEXTTX(txq, nexttx);
   7731 		/*
   7732 		 * fill in the next descriptors. legacy or adcanced format
   7733 		 * is the same here
   7734 		 */
   7735 		for (seg = 1; seg < dmamap->dm_nsegs;
   7736 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7737 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7738 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7739 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7740 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7741 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7742 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7743 			lasttx = nexttx;
   7744 
   7745 			DPRINTF(WM_DEBUG_TX,
   7746 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7747 			     "len %#04zx\n",
   7748 			    device_xname(sc->sc_dev), nexttx,
   7749 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7750 			    dmamap->dm_segs[seg].ds_len));
   7751 		}
   7752 
   7753 		KASSERT(lasttx != -1);
   7754 
   7755 		/*
   7756 		 * Set up the command byte on the last descriptor of
   7757 		 * the packet.  If we're in the interrupt delay window,
   7758 		 * delay the interrupt.
   7759 		 */
   7760 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7761 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7762 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7763 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7764 
   7765 		txs->txs_lastdesc = lasttx;
   7766 
   7767 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7768 		    device_xname(sc->sc_dev),
   7769 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7770 
   7771 		/* Sync the descriptors we're using. */
   7772 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7773 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7774 
   7775 		/* Give the packet to the chip. */
   7776 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7777 		sent = true;
   7778 
   7779 		DPRINTF(WM_DEBUG_TX,
   7780 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7781 
   7782 		DPRINTF(WM_DEBUG_TX,
   7783 		    ("%s: TX: finished transmitting packet, job %d\n",
   7784 		    device_xname(sc->sc_dev), txq->txq_snext));
   7785 
   7786 		/* Advance the tx pointer. */
   7787 		txq->txq_free -= txs->txs_ndesc;
   7788 		txq->txq_next = nexttx;
   7789 
   7790 		txq->txq_sfree--;
   7791 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7792 
   7793 		/* Pass the packet to any BPF listeners. */
   7794 		bpf_mtap(ifp, m0);
   7795 	}
   7796 
   7797 	if (m0 != NULL) {
   7798 		if (!is_transmit)
   7799 			ifp->if_flags |= IFF_OACTIVE;
   7800 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7801 		WM_Q_EVCNT_INCR(txq, txdrop);
   7802 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7803 			__func__));
   7804 		m_freem(m0);
   7805 	}
   7806 
   7807 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7808 		/* No more slots; notify upper layer. */
   7809 		if (!is_transmit)
   7810 			ifp->if_flags |= IFF_OACTIVE;
   7811 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7812 	}
   7813 
   7814 	if (sent) {
   7815 		/* Set a watchdog timer in case the chip flakes out. */
   7816 		ifp->if_timer = 5;
   7817 	}
   7818 }
   7819 
   7820 static void
   7821 wm_deferred_start_locked(struct wm_txqueue *txq)
   7822 {
   7823 	struct wm_softc *sc = txq->txq_sc;
   7824 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7825 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7826 	int qid = wmq->wmq_id;
   7827 
   7828 	KASSERT(mutex_owned(txq->txq_lock));
   7829 
   7830 	if (txq->txq_stopping) {
   7831 		mutex_exit(txq->txq_lock);
   7832 		return;
   7833 	}
   7834 
   7835 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7836 		/* XXX need for ALTQ or one CPU system */
   7837 		if (qid == 0)
   7838 			wm_nq_start_locked(ifp);
   7839 		wm_nq_transmit_locked(ifp, txq);
   7840 	} else {
   7841 		/* XXX need for ALTQ or one CPU system */
   7842 		if (qid == 0)
   7843 			wm_start_locked(ifp);
   7844 		wm_transmit_locked(ifp, txq);
   7845 	}
   7846 }
   7847 
   7848 /* Interrupt */
   7849 
   7850 /*
   7851  * wm_txeof:
   7852  *
   7853  *	Helper; handle transmit interrupts.
   7854  */
   7855 static int
   7856 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7857 {
   7858 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7859 	struct wm_txsoft *txs;
   7860 	bool processed = false;
   7861 	int count = 0;
   7862 	int i;
   7863 	uint8_t status;
   7864 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7865 
   7866 	KASSERT(mutex_owned(txq->txq_lock));
   7867 
   7868 	if (txq->txq_stopping)
   7869 		return 0;
   7870 
   7871 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7872 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7873 	if (wmq->wmq_id == 0)
   7874 		ifp->if_flags &= ~IFF_OACTIVE;
   7875 
   7876 	/*
   7877 	 * Go through the Tx list and free mbufs for those
   7878 	 * frames which have been transmitted.
   7879 	 */
   7880 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7881 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7882 		txs = &txq->txq_soft[i];
   7883 
   7884 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7885 			device_xname(sc->sc_dev), i));
   7886 
   7887 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7888 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7889 
   7890 		status =
   7891 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7892 		if ((status & WTX_ST_DD) == 0) {
   7893 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7894 			    BUS_DMASYNC_PREREAD);
   7895 			break;
   7896 		}
   7897 
   7898 		processed = true;
   7899 		count++;
   7900 		DPRINTF(WM_DEBUG_TX,
   7901 		    ("%s: TX: job %d done: descs %d..%d\n",
   7902 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7903 		    txs->txs_lastdesc));
   7904 
   7905 		/*
   7906 		 * XXX We should probably be using the statistics
   7907 		 * XXX registers, but I don't know if they exist
   7908 		 * XXX on chips before the i82544.
   7909 		 */
   7910 
   7911 #ifdef WM_EVENT_COUNTERS
   7912 		if (status & WTX_ST_TU)
   7913 			WM_Q_EVCNT_INCR(txq, tu);
   7914 #endif /* WM_EVENT_COUNTERS */
   7915 
   7916 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7917 			ifp->if_oerrors++;
   7918 			if (status & WTX_ST_LC)
   7919 				log(LOG_WARNING, "%s: late collision\n",
   7920 				    device_xname(sc->sc_dev));
   7921 			else if (status & WTX_ST_EC) {
   7922 				ifp->if_collisions += 16;
   7923 				log(LOG_WARNING, "%s: excessive collisions\n",
   7924 				    device_xname(sc->sc_dev));
   7925 			}
   7926 		} else
   7927 			ifp->if_opackets++;
   7928 
   7929 		txq->txq_packets++;
   7930 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   7931 
   7932 		txq->txq_free += txs->txs_ndesc;
   7933 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7934 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7935 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7936 		m_freem(txs->txs_mbuf);
   7937 		txs->txs_mbuf = NULL;
   7938 	}
   7939 
   7940 	/* Update the dirty transmit buffer pointer. */
   7941 	txq->txq_sdirty = i;
   7942 	DPRINTF(WM_DEBUG_TX,
   7943 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7944 
   7945 	if (count != 0)
   7946 		rnd_add_uint32(&sc->rnd_source, count);
   7947 
   7948 	/*
   7949 	 * If there are no more pending transmissions, cancel the watchdog
   7950 	 * timer.
   7951 	 */
   7952 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7953 		ifp->if_timer = 0;
   7954 
   7955 	return processed;
   7956 }
   7957 
   7958 static inline uint32_t
   7959 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7960 {
   7961 	struct wm_softc *sc = rxq->rxq_sc;
   7962 
   7963 	if (sc->sc_type == WM_T_82574)
   7964 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7965 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7966 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7967 	else
   7968 		return rxq->rxq_descs[idx].wrx_status;
   7969 }
   7970 
   7971 static inline uint32_t
   7972 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7973 {
   7974 	struct wm_softc *sc = rxq->rxq_sc;
   7975 
   7976 	if (sc->sc_type == WM_T_82574)
   7977 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7978 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7979 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7980 	else
   7981 		return rxq->rxq_descs[idx].wrx_errors;
   7982 }
   7983 
   7984 static inline uint16_t
   7985 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7986 {
   7987 	struct wm_softc *sc = rxq->rxq_sc;
   7988 
   7989 	if (sc->sc_type == WM_T_82574)
   7990 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7991 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7992 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7993 	else
   7994 		return rxq->rxq_descs[idx].wrx_special;
   7995 }
   7996 
   7997 static inline int
   7998 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7999 {
   8000 	struct wm_softc *sc = rxq->rxq_sc;
   8001 
   8002 	if (sc->sc_type == WM_T_82574)
   8003 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8004 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8005 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8006 	else
   8007 		return rxq->rxq_descs[idx].wrx_len;
   8008 }
   8009 
   8010 #ifdef WM_DEBUG
   8011 static inline uint32_t
   8012 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8013 {
   8014 	struct wm_softc *sc = rxq->rxq_sc;
   8015 
   8016 	if (sc->sc_type == WM_T_82574)
   8017 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8018 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8019 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8020 	else
   8021 		return 0;
   8022 }
   8023 
   8024 static inline uint8_t
   8025 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8026 {
   8027 	struct wm_softc *sc = rxq->rxq_sc;
   8028 
   8029 	if (sc->sc_type == WM_T_82574)
   8030 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8031 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8032 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8033 	else
   8034 		return 0;
   8035 }
   8036 #endif /* WM_DEBUG */
   8037 
   8038 static inline bool
   8039 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8040     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8041 {
   8042 
   8043 	if (sc->sc_type == WM_T_82574)
   8044 		return (status & ext_bit) != 0;
   8045 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8046 		return (status & nq_bit) != 0;
   8047 	else
   8048 		return (status & legacy_bit) != 0;
   8049 }
   8050 
   8051 static inline bool
   8052 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8053     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8054 {
   8055 
   8056 	if (sc->sc_type == WM_T_82574)
   8057 		return (error & ext_bit) != 0;
   8058 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8059 		return (error & nq_bit) != 0;
   8060 	else
   8061 		return (error & legacy_bit) != 0;
   8062 }
   8063 
   8064 static inline bool
   8065 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8066 {
   8067 
   8068 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8069 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8070 		return true;
   8071 	else
   8072 		return false;
   8073 }
   8074 
   8075 static inline bool
   8076 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8077 {
   8078 	struct wm_softc *sc = rxq->rxq_sc;
   8079 
   8080 	/* XXXX missing error bit for newqueue? */
   8081 	if (wm_rxdesc_is_set_error(sc, errors,
   8082 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   8083 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   8084 		NQRXC_ERROR_RXE)) {
   8085 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   8086 			log(LOG_WARNING, "%s: symbol error\n",
   8087 			    device_xname(sc->sc_dev));
   8088 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   8089 			log(LOG_WARNING, "%s: receive sequence error\n",
   8090 			    device_xname(sc->sc_dev));
   8091 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   8092 			log(LOG_WARNING, "%s: CRC error\n",
   8093 			    device_xname(sc->sc_dev));
   8094 		return true;
   8095 	}
   8096 
   8097 	return false;
   8098 }
   8099 
   8100 static inline bool
   8101 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8102 {
   8103 	struct wm_softc *sc = rxq->rxq_sc;
   8104 
   8105 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8106 		NQRXC_STATUS_DD)) {
   8107 		/* We have processed all of the receive descriptors. */
   8108 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8109 		return false;
   8110 	}
   8111 
   8112 	return true;
   8113 }
   8114 
   8115 static inline bool
   8116 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   8117     struct mbuf *m)
   8118 {
   8119 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   8120 
   8121 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8122 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8123 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   8124 	}
   8125 
   8126 	return true;
   8127 }
   8128 
   8129 static inline void
   8130 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8131     uint32_t errors, struct mbuf *m)
   8132 {
   8133 	struct wm_softc *sc = rxq->rxq_sc;
   8134 
   8135 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8136 		if (wm_rxdesc_is_set_status(sc, status,
   8137 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8138 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8139 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8140 			if (wm_rxdesc_is_set_error(sc, errors,
   8141 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8142 				m->m_pkthdr.csum_flags |=
   8143 					M_CSUM_IPv4_BAD;
   8144 		}
   8145 		if (wm_rxdesc_is_set_status(sc, status,
   8146 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8147 			/*
   8148 			 * Note: we don't know if this was TCP or UDP,
   8149 			 * so we just set both bits, and expect the
   8150 			 * upper layers to deal.
   8151 			 */
   8152 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8153 			m->m_pkthdr.csum_flags |=
   8154 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8155 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8156 			if (wm_rxdesc_is_set_error(sc, errors,
   8157 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8158 				m->m_pkthdr.csum_flags |=
   8159 					M_CSUM_TCP_UDP_BAD;
   8160 		}
   8161 	}
   8162 }
   8163 
   8164 /*
   8165  * wm_rxeof:
   8166  *
   8167  *	Helper; handle receive interrupts.
   8168  */
   8169 static void
   8170 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8171 {
   8172 	struct wm_softc *sc = rxq->rxq_sc;
   8173 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8174 	struct wm_rxsoft *rxs;
   8175 	struct mbuf *m;
   8176 	int i, len;
   8177 	int count = 0;
   8178 	uint32_t status, errors;
   8179 	uint16_t vlantag;
   8180 
   8181 	KASSERT(mutex_owned(rxq->rxq_lock));
   8182 
   8183 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8184 		if (limit-- == 0) {
   8185 			rxq->rxq_ptr = i;
   8186 			break;
   8187 		}
   8188 
   8189 		rxs = &rxq->rxq_soft[i];
   8190 
   8191 		DPRINTF(WM_DEBUG_RX,
   8192 		    ("%s: RX: checking descriptor %d\n",
   8193 		    device_xname(sc->sc_dev), i));
   8194 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8195 
   8196 		status = wm_rxdesc_get_status(rxq, i);
   8197 		errors = wm_rxdesc_get_errors(rxq, i);
   8198 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8199 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8200 #ifdef WM_DEBUG
   8201 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8202 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8203 #endif
   8204 
   8205 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8206 			/*
   8207 			 * Update the receive pointer holding rxq_lock
   8208 			 * consistent with increment counter.
   8209 			 */
   8210 			rxq->rxq_ptr = i;
   8211 			break;
   8212 		}
   8213 
   8214 		count++;
   8215 		if (__predict_false(rxq->rxq_discard)) {
   8216 			DPRINTF(WM_DEBUG_RX,
   8217 			    ("%s: RX: discarding contents of descriptor %d\n",
   8218 			    device_xname(sc->sc_dev), i));
   8219 			wm_init_rxdesc(rxq, i);
   8220 			if (wm_rxdesc_is_eop(rxq, status)) {
   8221 				/* Reset our state. */
   8222 				DPRINTF(WM_DEBUG_RX,
   8223 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8224 				    device_xname(sc->sc_dev)));
   8225 				rxq->rxq_discard = 0;
   8226 			}
   8227 			continue;
   8228 		}
   8229 
   8230 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8231 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8232 
   8233 		m = rxs->rxs_mbuf;
   8234 
   8235 		/*
   8236 		 * Add a new receive buffer to the ring, unless of
   8237 		 * course the length is zero. Treat the latter as a
   8238 		 * failed mapping.
   8239 		 */
   8240 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8241 			/*
   8242 			 * Failed, throw away what we've done so
   8243 			 * far, and discard the rest of the packet.
   8244 			 */
   8245 			ifp->if_ierrors++;
   8246 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8247 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8248 			wm_init_rxdesc(rxq, i);
   8249 			if (!wm_rxdesc_is_eop(rxq, status))
   8250 				rxq->rxq_discard = 1;
   8251 			if (rxq->rxq_head != NULL)
   8252 				m_freem(rxq->rxq_head);
   8253 			WM_RXCHAIN_RESET(rxq);
   8254 			DPRINTF(WM_DEBUG_RX,
   8255 			    ("%s: RX: Rx buffer allocation failed, "
   8256 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8257 			    rxq->rxq_discard ? " (discard)" : ""));
   8258 			continue;
   8259 		}
   8260 
   8261 		m->m_len = len;
   8262 		rxq->rxq_len += len;
   8263 		DPRINTF(WM_DEBUG_RX,
   8264 		    ("%s: RX: buffer at %p len %d\n",
   8265 		    device_xname(sc->sc_dev), m->m_data, len));
   8266 
   8267 		/* If this is not the end of the packet, keep looking. */
   8268 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8269 			WM_RXCHAIN_LINK(rxq, m);
   8270 			DPRINTF(WM_DEBUG_RX,
   8271 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8272 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8273 			continue;
   8274 		}
   8275 
   8276 		/*
   8277 		 * Okay, we have the entire packet now.  The chip is
   8278 		 * configured to include the FCS except I350 and I21[01]
   8279 		 * (not all chips can be configured to strip it),
   8280 		 * so we need to trim it.
   8281 		 * May need to adjust length of previous mbuf in the
   8282 		 * chain if the current mbuf is too short.
   8283 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8284 		 * is always set in I350, so we don't trim it.
   8285 		 */
   8286 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8287 		    && (sc->sc_type != WM_T_I210)
   8288 		    && (sc->sc_type != WM_T_I211)) {
   8289 			if (m->m_len < ETHER_CRC_LEN) {
   8290 				rxq->rxq_tail->m_len
   8291 				    -= (ETHER_CRC_LEN - m->m_len);
   8292 				m->m_len = 0;
   8293 			} else
   8294 				m->m_len -= ETHER_CRC_LEN;
   8295 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8296 		} else
   8297 			len = rxq->rxq_len;
   8298 
   8299 		WM_RXCHAIN_LINK(rxq, m);
   8300 
   8301 		*rxq->rxq_tailp = NULL;
   8302 		m = rxq->rxq_head;
   8303 
   8304 		WM_RXCHAIN_RESET(rxq);
   8305 
   8306 		DPRINTF(WM_DEBUG_RX,
   8307 		    ("%s: RX: have entire packet, len -> %d\n",
   8308 		    device_xname(sc->sc_dev), len));
   8309 
   8310 		/* If an error occurred, update stats and drop the packet. */
   8311 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8312 			m_freem(m);
   8313 			continue;
   8314 		}
   8315 
   8316 		/* No errors.  Receive the packet. */
   8317 		m_set_rcvif(m, ifp);
   8318 		m->m_pkthdr.len = len;
   8319 		/*
   8320 		 * TODO
   8321 		 * should be save rsshash and rsstype to this mbuf.
   8322 		 */
   8323 		DPRINTF(WM_DEBUG_RX,
   8324 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8325 			device_xname(sc->sc_dev), rsstype, rsshash));
   8326 
   8327 		/*
   8328 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8329 		 * for us.  Associate the tag with the packet.
   8330 		 */
   8331 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8332 			continue;
   8333 
   8334 		/* Set up checksum info for this packet. */
   8335 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8336 		/*
   8337 		 * Update the receive pointer holding rxq_lock consistent with
   8338 		 * increment counter.
   8339 		 */
   8340 		rxq->rxq_ptr = i;
   8341 		rxq->rxq_packets++;
   8342 		rxq->rxq_bytes += len;
   8343 		mutex_exit(rxq->rxq_lock);
   8344 
   8345 		/* Pass it on. */
   8346 		if_percpuq_enqueue(sc->sc_ipq, m);
   8347 
   8348 		mutex_enter(rxq->rxq_lock);
   8349 
   8350 		if (rxq->rxq_stopping)
   8351 			break;
   8352 	}
   8353 
   8354 	if (count != 0)
   8355 		rnd_add_uint32(&sc->rnd_source, count);
   8356 
   8357 	DPRINTF(WM_DEBUG_RX,
   8358 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8359 }
   8360 
   8361 /*
   8362  * wm_linkintr_gmii:
   8363  *
   8364  *	Helper; handle link interrupts for GMII.
   8365  */
   8366 static void
   8367 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8368 {
   8369 
   8370 	KASSERT(WM_CORE_LOCKED(sc));
   8371 
   8372 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8373 		__func__));
   8374 
   8375 	if (icr & ICR_LSC) {
   8376 		uint32_t reg;
   8377 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8378 
   8379 		if ((status & STATUS_LU) != 0) {
   8380 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8381 				device_xname(sc->sc_dev),
   8382 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8383 		} else {
   8384 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8385 				device_xname(sc->sc_dev)));
   8386 		}
   8387 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8388 			wm_gig_downshift_workaround_ich8lan(sc);
   8389 
   8390 		if ((sc->sc_type == WM_T_ICH8)
   8391 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8392 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8393 		}
   8394 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8395 			device_xname(sc->sc_dev)));
   8396 		mii_pollstat(&sc->sc_mii);
   8397 		if (sc->sc_type == WM_T_82543) {
   8398 			int miistatus, active;
   8399 
   8400 			/*
   8401 			 * With 82543, we need to force speed and
   8402 			 * duplex on the MAC equal to what the PHY
   8403 			 * speed and duplex configuration is.
   8404 			 */
   8405 			miistatus = sc->sc_mii.mii_media_status;
   8406 
   8407 			if (miistatus & IFM_ACTIVE) {
   8408 				active = sc->sc_mii.mii_media_active;
   8409 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8410 				switch (IFM_SUBTYPE(active)) {
   8411 				case IFM_10_T:
   8412 					sc->sc_ctrl |= CTRL_SPEED_10;
   8413 					break;
   8414 				case IFM_100_TX:
   8415 					sc->sc_ctrl |= CTRL_SPEED_100;
   8416 					break;
   8417 				case IFM_1000_T:
   8418 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8419 					break;
   8420 				default:
   8421 					/*
   8422 					 * fiber?
   8423 					 * Shoud not enter here.
   8424 					 */
   8425 					printf("unknown media (%x)\n", active);
   8426 					break;
   8427 				}
   8428 				if (active & IFM_FDX)
   8429 					sc->sc_ctrl |= CTRL_FD;
   8430 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8431 			}
   8432 		} else if (sc->sc_type == WM_T_PCH) {
   8433 			wm_k1_gig_workaround_hv(sc,
   8434 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8435 		}
   8436 
   8437 		if ((sc->sc_phytype == WMPHY_82578)
   8438 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8439 			== IFM_1000_T)) {
   8440 
   8441 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8442 				delay(200*1000); /* XXX too big */
   8443 
   8444 				/* Link stall fix for link up */
   8445 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8446 				    HV_MUX_DATA_CTRL,
   8447 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8448 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8449 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8450 				    HV_MUX_DATA_CTRL,
   8451 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8452 			}
   8453 		}
   8454 		/*
   8455 		 * I217 Packet Loss issue:
   8456 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8457 		 * on power up.
   8458 		 * Set the Beacon Duration for I217 to 8 usec
   8459 		 */
   8460 		if ((sc->sc_type == WM_T_PCH_LPT)
   8461 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8462 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8463 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8464 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8465 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8466 		}
   8467 
   8468 		/* XXX Work-around I218 hang issue */
   8469 		/* e1000_k1_workaround_lpt_lp() */
   8470 
   8471 		if ((sc->sc_type == WM_T_PCH_LPT)
   8472 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8473 			/*
   8474 			 * Set platform power management values for Latency
   8475 			 * Tolerance Reporting (LTR)
   8476 			 */
   8477 			wm_platform_pm_pch_lpt(sc,
   8478 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8479 				    != 0));
   8480 		}
   8481 
   8482 		/* FEXTNVM6 K1-off workaround */
   8483 		if (sc->sc_type == WM_T_PCH_SPT) {
   8484 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8485 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8486 			    & FEXTNVM6_K1_OFF_ENABLE)
   8487 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8488 			else
   8489 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8490 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8491 		}
   8492 	} else if (icr & ICR_RXSEQ) {
   8493 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8494 			device_xname(sc->sc_dev)));
   8495 	}
   8496 }
   8497 
   8498 /*
   8499  * wm_linkintr_tbi:
   8500  *
   8501  *	Helper; handle link interrupts for TBI mode.
   8502  */
   8503 static void
   8504 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8505 {
   8506 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8507 	uint32_t status;
   8508 
   8509 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8510 		__func__));
   8511 
   8512 	status = CSR_READ(sc, WMREG_STATUS);
   8513 	if (icr & ICR_LSC) {
   8514 		if (status & STATUS_LU) {
   8515 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8516 			    device_xname(sc->sc_dev),
   8517 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8518 			/*
   8519 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8520 			 * so we should update sc->sc_ctrl
   8521 			 */
   8522 
   8523 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8524 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8525 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8526 			if (status & STATUS_FD)
   8527 				sc->sc_tctl |=
   8528 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8529 			else
   8530 				sc->sc_tctl |=
   8531 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8532 			if (sc->sc_ctrl & CTRL_TFCE)
   8533 				sc->sc_fcrtl |= FCRTL_XONE;
   8534 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8535 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8536 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8537 				      sc->sc_fcrtl);
   8538 			sc->sc_tbi_linkup = 1;
   8539 			if_link_state_change(ifp, LINK_STATE_UP);
   8540 		} else {
   8541 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8542 			    device_xname(sc->sc_dev)));
   8543 			sc->sc_tbi_linkup = 0;
   8544 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8545 		}
   8546 		/* Update LED */
   8547 		wm_tbi_serdes_set_linkled(sc);
   8548 	} else if (icr & ICR_RXSEQ) {
   8549 		DPRINTF(WM_DEBUG_LINK,
   8550 		    ("%s: LINK: Receive sequence error\n",
   8551 		    device_xname(sc->sc_dev)));
   8552 	}
   8553 }
   8554 
   8555 /*
   8556  * wm_linkintr_serdes:
   8557  *
   8558  *	Helper; handle link interrupts for TBI mode.
   8559  */
   8560 static void
   8561 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8562 {
   8563 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8564 	struct mii_data *mii = &sc->sc_mii;
   8565 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8566 	uint32_t pcs_adv, pcs_lpab, reg;
   8567 
   8568 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8569 		__func__));
   8570 
   8571 	if (icr & ICR_LSC) {
   8572 		/* Check PCS */
   8573 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8574 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8575 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8576 				device_xname(sc->sc_dev)));
   8577 			mii->mii_media_status |= IFM_ACTIVE;
   8578 			sc->sc_tbi_linkup = 1;
   8579 			if_link_state_change(ifp, LINK_STATE_UP);
   8580 		} else {
   8581 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8582 				device_xname(sc->sc_dev)));
   8583 			mii->mii_media_status |= IFM_NONE;
   8584 			sc->sc_tbi_linkup = 0;
   8585 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8586 			wm_tbi_serdes_set_linkled(sc);
   8587 			return;
   8588 		}
   8589 		mii->mii_media_active |= IFM_1000_SX;
   8590 		if ((reg & PCS_LSTS_FDX) != 0)
   8591 			mii->mii_media_active |= IFM_FDX;
   8592 		else
   8593 			mii->mii_media_active |= IFM_HDX;
   8594 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8595 			/* Check flow */
   8596 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8597 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8598 				DPRINTF(WM_DEBUG_LINK,
   8599 				    ("XXX LINKOK but not ACOMP\n"));
   8600 				return;
   8601 			}
   8602 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8603 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8604 			DPRINTF(WM_DEBUG_LINK,
   8605 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8606 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8607 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8608 				mii->mii_media_active |= IFM_FLOW
   8609 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8610 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8611 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8612 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8613 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8614 				mii->mii_media_active |= IFM_FLOW
   8615 				    | IFM_ETH_TXPAUSE;
   8616 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8617 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8618 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8619 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8620 				mii->mii_media_active |= IFM_FLOW
   8621 				    | IFM_ETH_RXPAUSE;
   8622 		}
   8623 		/* Update LED */
   8624 		wm_tbi_serdes_set_linkled(sc);
   8625 	} else {
   8626 		DPRINTF(WM_DEBUG_LINK,
   8627 		    ("%s: LINK: Receive sequence error\n",
   8628 		    device_xname(sc->sc_dev)));
   8629 	}
   8630 }
   8631 
   8632 /*
   8633  * wm_linkintr:
   8634  *
   8635  *	Helper; handle link interrupts.
   8636  */
   8637 static void
   8638 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8639 {
   8640 
   8641 	KASSERT(WM_CORE_LOCKED(sc));
   8642 
   8643 	if (sc->sc_flags & WM_F_HAS_MII)
   8644 		wm_linkintr_gmii(sc, icr);
   8645 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8646 	    && (sc->sc_type >= WM_T_82575))
   8647 		wm_linkintr_serdes(sc, icr);
   8648 	else
   8649 		wm_linkintr_tbi(sc, icr);
   8650 }
   8651 
   8652 /*
   8653  * wm_intr_legacy:
   8654  *
   8655  *	Interrupt service routine for INTx and MSI.
   8656  */
   8657 static int
   8658 wm_intr_legacy(void *arg)
   8659 {
   8660 	struct wm_softc *sc = arg;
   8661 	struct wm_queue *wmq = &sc->sc_queue[0];
   8662 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8663 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8664 	uint32_t icr, rndval = 0;
   8665 	int handled = 0;
   8666 
   8667 	while (1 /* CONSTCOND */) {
   8668 		icr = CSR_READ(sc, WMREG_ICR);
   8669 		if ((icr & sc->sc_icr) == 0)
   8670 			break;
   8671 		if (handled == 0) {
   8672 			DPRINTF(WM_DEBUG_TX,
   8673 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8674 		}
   8675 		if (rndval == 0)
   8676 			rndval = icr;
   8677 
   8678 		mutex_enter(rxq->rxq_lock);
   8679 
   8680 		if (rxq->rxq_stopping) {
   8681 			mutex_exit(rxq->rxq_lock);
   8682 			break;
   8683 		}
   8684 
   8685 		handled = 1;
   8686 
   8687 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8688 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8689 			DPRINTF(WM_DEBUG_RX,
   8690 			    ("%s: RX: got Rx intr 0x%08x\n",
   8691 			    device_xname(sc->sc_dev),
   8692 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8693 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8694 		}
   8695 #endif
   8696 		wm_rxeof(rxq, UINT_MAX);
   8697 
   8698 		mutex_exit(rxq->rxq_lock);
   8699 		mutex_enter(txq->txq_lock);
   8700 
   8701 		if (txq->txq_stopping) {
   8702 			mutex_exit(txq->txq_lock);
   8703 			break;
   8704 		}
   8705 
   8706 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8707 		if (icr & ICR_TXDW) {
   8708 			DPRINTF(WM_DEBUG_TX,
   8709 			    ("%s: TX: got TXDW interrupt\n",
   8710 			    device_xname(sc->sc_dev)));
   8711 			WM_Q_EVCNT_INCR(txq, txdw);
   8712 		}
   8713 #endif
   8714 		wm_txeof(sc, txq);
   8715 
   8716 		mutex_exit(txq->txq_lock);
   8717 		WM_CORE_LOCK(sc);
   8718 
   8719 		if (sc->sc_core_stopping) {
   8720 			WM_CORE_UNLOCK(sc);
   8721 			break;
   8722 		}
   8723 
   8724 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8725 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8726 			wm_linkintr(sc, icr);
   8727 		}
   8728 
   8729 		WM_CORE_UNLOCK(sc);
   8730 
   8731 		if (icr & ICR_RXO) {
   8732 #if defined(WM_DEBUG)
   8733 			log(LOG_WARNING, "%s: Receive overrun\n",
   8734 			    device_xname(sc->sc_dev));
   8735 #endif /* defined(WM_DEBUG) */
   8736 		}
   8737 	}
   8738 
   8739 	rnd_add_uint32(&sc->rnd_source, rndval);
   8740 
   8741 	if (handled) {
   8742 		/* Try to get more packets going. */
   8743 		softint_schedule(wmq->wmq_si);
   8744 	}
   8745 
   8746 	return handled;
   8747 }
   8748 
   8749 static inline void
   8750 wm_txrxintr_disable(struct wm_queue *wmq)
   8751 {
   8752 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8753 
   8754 	if (sc->sc_type == WM_T_82574)
   8755 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8756 	else if (sc->sc_type == WM_T_82575)
   8757 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8758 	else
   8759 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8760 }
   8761 
   8762 static inline void
   8763 wm_txrxintr_enable(struct wm_queue *wmq)
   8764 {
   8765 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8766 
   8767 	wm_itrs_calculate(sc, wmq);
   8768 
   8769 	if (sc->sc_type == WM_T_82574)
   8770 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8771 	else if (sc->sc_type == WM_T_82575)
   8772 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8773 	else
   8774 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8775 }
   8776 
   8777 static int
   8778 wm_txrxintr_msix(void *arg)
   8779 {
   8780 	struct wm_queue *wmq = arg;
   8781 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8782 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8783 	struct wm_softc *sc = txq->txq_sc;
   8784 	u_int limit = sc->sc_rx_intr_process_limit;
   8785 
   8786 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8787 
   8788 	DPRINTF(WM_DEBUG_TX,
   8789 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8790 
   8791 	wm_txrxintr_disable(wmq);
   8792 
   8793 	mutex_enter(txq->txq_lock);
   8794 
   8795 	if (txq->txq_stopping) {
   8796 		mutex_exit(txq->txq_lock);
   8797 		return 0;
   8798 	}
   8799 
   8800 	WM_Q_EVCNT_INCR(txq, txdw);
   8801 	wm_txeof(sc, txq);
   8802 	/* wm_deferred start() is done in wm_handle_queue(). */
   8803 	mutex_exit(txq->txq_lock);
   8804 
   8805 	DPRINTF(WM_DEBUG_RX,
   8806 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8807 	mutex_enter(rxq->rxq_lock);
   8808 
   8809 	if (rxq->rxq_stopping) {
   8810 		mutex_exit(rxq->rxq_lock);
   8811 		return 0;
   8812 	}
   8813 
   8814 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8815 	wm_rxeof(rxq, limit);
   8816 	mutex_exit(rxq->rxq_lock);
   8817 
   8818 	wm_itrs_writereg(sc, wmq);
   8819 
   8820 	softint_schedule(wmq->wmq_si);
   8821 
   8822 	return 1;
   8823 }
   8824 
   8825 static void
   8826 wm_handle_queue(void *arg)
   8827 {
   8828 	struct wm_queue *wmq = arg;
   8829 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8830 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8831 	struct wm_softc *sc = txq->txq_sc;
   8832 	u_int limit = sc->sc_rx_process_limit;
   8833 
   8834 	mutex_enter(txq->txq_lock);
   8835 	if (txq->txq_stopping) {
   8836 		mutex_exit(txq->txq_lock);
   8837 		return;
   8838 	}
   8839 	wm_txeof(sc, txq);
   8840 	wm_deferred_start_locked(txq);
   8841 	mutex_exit(txq->txq_lock);
   8842 
   8843 	mutex_enter(rxq->rxq_lock);
   8844 	if (rxq->rxq_stopping) {
   8845 		mutex_exit(rxq->rxq_lock);
   8846 		return;
   8847 	}
   8848 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8849 	wm_rxeof(rxq, limit);
   8850 	mutex_exit(rxq->rxq_lock);
   8851 
   8852 	wm_txrxintr_enable(wmq);
   8853 }
   8854 
   8855 /*
   8856  * wm_linkintr_msix:
   8857  *
   8858  *	Interrupt service routine for link status change for MSI-X.
   8859  */
   8860 static int
   8861 wm_linkintr_msix(void *arg)
   8862 {
   8863 	struct wm_softc *sc = arg;
   8864 	uint32_t reg;
   8865 
   8866 	DPRINTF(WM_DEBUG_LINK,
   8867 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8868 
   8869 	reg = CSR_READ(sc, WMREG_ICR);
   8870 	WM_CORE_LOCK(sc);
   8871 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8872 		goto out;
   8873 
   8874 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8875 	wm_linkintr(sc, ICR_LSC);
   8876 
   8877 out:
   8878 	WM_CORE_UNLOCK(sc);
   8879 
   8880 	if (sc->sc_type == WM_T_82574)
   8881 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8882 	else if (sc->sc_type == WM_T_82575)
   8883 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8884 	else
   8885 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8886 
   8887 	return 1;
   8888 }
   8889 
   8890 /*
   8891  * Media related.
   8892  * GMII, SGMII, TBI (and SERDES)
   8893  */
   8894 
   8895 /* Common */
   8896 
   8897 /*
   8898  * wm_tbi_serdes_set_linkled:
   8899  *
   8900  *	Update the link LED on TBI and SERDES devices.
   8901  */
   8902 static void
   8903 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8904 {
   8905 
   8906 	if (sc->sc_tbi_linkup)
   8907 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8908 	else
   8909 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8910 
   8911 	/* 82540 or newer devices are active low */
   8912 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8913 
   8914 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8915 }
   8916 
   8917 /* GMII related */
   8918 
   8919 /*
   8920  * wm_gmii_reset:
   8921  *
   8922  *	Reset the PHY.
   8923  */
   8924 static void
   8925 wm_gmii_reset(struct wm_softc *sc)
   8926 {
   8927 	uint32_t reg;
   8928 	int rv;
   8929 
   8930 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8931 		device_xname(sc->sc_dev), __func__));
   8932 
   8933 	rv = sc->phy.acquire(sc);
   8934 	if (rv != 0) {
   8935 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8936 		    __func__);
   8937 		return;
   8938 	}
   8939 
   8940 	switch (sc->sc_type) {
   8941 	case WM_T_82542_2_0:
   8942 	case WM_T_82542_2_1:
   8943 		/* null */
   8944 		break;
   8945 	case WM_T_82543:
   8946 		/*
   8947 		 * With 82543, we need to force speed and duplex on the MAC
   8948 		 * equal to what the PHY speed and duplex configuration is.
   8949 		 * In addition, we need to perform a hardware reset on the PHY
   8950 		 * to take it out of reset.
   8951 		 */
   8952 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8953 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8954 
   8955 		/* The PHY reset pin is active-low. */
   8956 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8957 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8958 		    CTRL_EXT_SWDPIN(4));
   8959 		reg |= CTRL_EXT_SWDPIO(4);
   8960 
   8961 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8962 		CSR_WRITE_FLUSH(sc);
   8963 		delay(10*1000);
   8964 
   8965 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8966 		CSR_WRITE_FLUSH(sc);
   8967 		delay(150);
   8968 #if 0
   8969 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8970 #endif
   8971 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8972 		break;
   8973 	case WM_T_82544:	/* reset 10000us */
   8974 	case WM_T_82540:
   8975 	case WM_T_82545:
   8976 	case WM_T_82545_3:
   8977 	case WM_T_82546:
   8978 	case WM_T_82546_3:
   8979 	case WM_T_82541:
   8980 	case WM_T_82541_2:
   8981 	case WM_T_82547:
   8982 	case WM_T_82547_2:
   8983 	case WM_T_82571:	/* reset 100us */
   8984 	case WM_T_82572:
   8985 	case WM_T_82573:
   8986 	case WM_T_82574:
   8987 	case WM_T_82575:
   8988 	case WM_T_82576:
   8989 	case WM_T_82580:
   8990 	case WM_T_I350:
   8991 	case WM_T_I354:
   8992 	case WM_T_I210:
   8993 	case WM_T_I211:
   8994 	case WM_T_82583:
   8995 	case WM_T_80003:
   8996 		/* generic reset */
   8997 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8998 		CSR_WRITE_FLUSH(sc);
   8999 		delay(20000);
   9000 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9001 		CSR_WRITE_FLUSH(sc);
   9002 		delay(20000);
   9003 
   9004 		if ((sc->sc_type == WM_T_82541)
   9005 		    || (sc->sc_type == WM_T_82541_2)
   9006 		    || (sc->sc_type == WM_T_82547)
   9007 		    || (sc->sc_type == WM_T_82547_2)) {
   9008 			/* workaround for igp are done in igp_reset() */
   9009 			/* XXX add code to set LED after phy reset */
   9010 		}
   9011 		break;
   9012 	case WM_T_ICH8:
   9013 	case WM_T_ICH9:
   9014 	case WM_T_ICH10:
   9015 	case WM_T_PCH:
   9016 	case WM_T_PCH2:
   9017 	case WM_T_PCH_LPT:
   9018 	case WM_T_PCH_SPT:
   9019 		/* generic reset */
   9020 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9021 		CSR_WRITE_FLUSH(sc);
   9022 		delay(100);
   9023 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9024 		CSR_WRITE_FLUSH(sc);
   9025 		delay(150);
   9026 		break;
   9027 	default:
   9028 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9029 		    __func__);
   9030 		break;
   9031 	}
   9032 
   9033 	sc->phy.release(sc);
   9034 
   9035 	/* get_cfg_done */
   9036 	wm_get_cfg_done(sc);
   9037 
   9038 	/* extra setup */
   9039 	switch (sc->sc_type) {
   9040 	case WM_T_82542_2_0:
   9041 	case WM_T_82542_2_1:
   9042 	case WM_T_82543:
   9043 	case WM_T_82544:
   9044 	case WM_T_82540:
   9045 	case WM_T_82545:
   9046 	case WM_T_82545_3:
   9047 	case WM_T_82546:
   9048 	case WM_T_82546_3:
   9049 	case WM_T_82541_2:
   9050 	case WM_T_82547_2:
   9051 	case WM_T_82571:
   9052 	case WM_T_82572:
   9053 	case WM_T_82573:
   9054 	case WM_T_82574:
   9055 	case WM_T_82583:
   9056 	case WM_T_82575:
   9057 	case WM_T_82576:
   9058 	case WM_T_82580:
   9059 	case WM_T_I350:
   9060 	case WM_T_I354:
   9061 	case WM_T_I210:
   9062 	case WM_T_I211:
   9063 	case WM_T_80003:
   9064 		/* null */
   9065 		break;
   9066 	case WM_T_82541:
   9067 	case WM_T_82547:
   9068 		/* XXX Configure actively LED after PHY reset */
   9069 		break;
   9070 	case WM_T_ICH8:
   9071 	case WM_T_ICH9:
   9072 	case WM_T_ICH10:
   9073 	case WM_T_PCH:
   9074 	case WM_T_PCH2:
   9075 	case WM_T_PCH_LPT:
   9076 	case WM_T_PCH_SPT:
   9077 		wm_phy_post_reset(sc);
   9078 		break;
   9079 	default:
   9080 		panic("%s: unknown type\n", __func__);
   9081 		break;
   9082 	}
   9083 }
   9084 
   9085 /*
   9086  * Setup sc_phytype and mii_{read|write}reg.
   9087  *
   9088  *  To identify PHY type, correct read/write function should be selected.
   9089  * To select correct read/write function, PCI ID or MAC type are required
   9090  * without accessing PHY registers.
   9091  *
   9092  *  On the first call of this function, PHY ID is not known yet. Check
   9093  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9094  * result might be incorrect.
   9095  *
   9096  *  In the second call, PHY OUI and model is used to identify PHY type.
   9097  * It might not be perfpect because of the lack of compared entry, but it
   9098  * would be better than the first call.
   9099  *
   9100  *  If the detected new result and previous assumption is different,
   9101  * diagnous message will be printed.
   9102  */
   9103 static void
   9104 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9105     uint16_t phy_model)
   9106 {
   9107 	device_t dev = sc->sc_dev;
   9108 	struct mii_data *mii = &sc->sc_mii;
   9109 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9110 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9111 	mii_readreg_t new_readreg;
   9112 	mii_writereg_t new_writereg;
   9113 
   9114 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9115 		device_xname(sc->sc_dev), __func__));
   9116 
   9117 	if (mii->mii_readreg == NULL) {
   9118 		/*
   9119 		 *  This is the first call of this function. For ICH and PCH
   9120 		 * variants, it's difficult to determine the PHY access method
   9121 		 * by sc_type, so use the PCI product ID for some devices.
   9122 		 */
   9123 
   9124 		switch (sc->sc_pcidevid) {
   9125 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9126 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9127 			/* 82577 */
   9128 			new_phytype = WMPHY_82577;
   9129 			break;
   9130 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9131 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9132 			/* 82578 */
   9133 			new_phytype = WMPHY_82578;
   9134 			break;
   9135 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9136 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9137 			/* 82579 */
   9138 			new_phytype = WMPHY_82579;
   9139 			break;
   9140 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9141 		case PCI_PRODUCT_INTEL_82801I_BM:
   9142 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9143 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9144 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9145 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9146 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9147 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9148 			/* ICH8, 9, 10 with 82567 */
   9149 			new_phytype = WMPHY_BM;
   9150 			break;
   9151 		default:
   9152 			break;
   9153 		}
   9154 	} else {
   9155 		/* It's not the first call. Use PHY OUI and model */
   9156 		switch (phy_oui) {
   9157 		case MII_OUI_ATHEROS: /* XXX ??? */
   9158 			switch (phy_model) {
   9159 			case 0x0004: /* XXX */
   9160 				new_phytype = WMPHY_82578;
   9161 				break;
   9162 			default:
   9163 				break;
   9164 			}
   9165 			break;
   9166 		case MII_OUI_xxMARVELL:
   9167 			switch (phy_model) {
   9168 			case MII_MODEL_xxMARVELL_I210:
   9169 				new_phytype = WMPHY_I210;
   9170 				break;
   9171 			case MII_MODEL_xxMARVELL_E1011:
   9172 			case MII_MODEL_xxMARVELL_E1000_3:
   9173 			case MII_MODEL_xxMARVELL_E1000_5:
   9174 			case MII_MODEL_xxMARVELL_E1112:
   9175 				new_phytype = WMPHY_M88;
   9176 				break;
   9177 			case MII_MODEL_xxMARVELL_E1149:
   9178 				new_phytype = WMPHY_BM;
   9179 				break;
   9180 			case MII_MODEL_xxMARVELL_E1111:
   9181 			case MII_MODEL_xxMARVELL_I347:
   9182 			case MII_MODEL_xxMARVELL_E1512:
   9183 			case MII_MODEL_xxMARVELL_E1340M:
   9184 			case MII_MODEL_xxMARVELL_E1543:
   9185 				new_phytype = WMPHY_M88;
   9186 				break;
   9187 			case MII_MODEL_xxMARVELL_I82563:
   9188 				new_phytype = WMPHY_GG82563;
   9189 				break;
   9190 			default:
   9191 				break;
   9192 			}
   9193 			break;
   9194 		case MII_OUI_INTEL:
   9195 			switch (phy_model) {
   9196 			case MII_MODEL_INTEL_I82577:
   9197 				new_phytype = WMPHY_82577;
   9198 				break;
   9199 			case MII_MODEL_INTEL_I82579:
   9200 				new_phytype = WMPHY_82579;
   9201 				break;
   9202 			case MII_MODEL_INTEL_I217:
   9203 				new_phytype = WMPHY_I217;
   9204 				break;
   9205 			case MII_MODEL_INTEL_I82580:
   9206 			case MII_MODEL_INTEL_I350:
   9207 				new_phytype = WMPHY_82580;
   9208 				break;
   9209 			default:
   9210 				break;
   9211 			}
   9212 			break;
   9213 		case MII_OUI_yyINTEL:
   9214 			switch (phy_model) {
   9215 			case MII_MODEL_yyINTEL_I82562G:
   9216 			case MII_MODEL_yyINTEL_I82562EM:
   9217 			case MII_MODEL_yyINTEL_I82562ET:
   9218 				new_phytype = WMPHY_IFE;
   9219 				break;
   9220 			case MII_MODEL_yyINTEL_IGP01E1000:
   9221 				new_phytype = WMPHY_IGP;
   9222 				break;
   9223 			case MII_MODEL_yyINTEL_I82566:
   9224 				new_phytype = WMPHY_IGP_3;
   9225 				break;
   9226 			default:
   9227 				break;
   9228 			}
   9229 			break;
   9230 		default:
   9231 			break;
   9232 		}
   9233 		if (new_phytype == WMPHY_UNKNOWN)
   9234 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9235 			    __func__);
   9236 
   9237 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9238 		    && (sc->sc_phytype != new_phytype )) {
   9239 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9240 			    "was incorrect. PHY type from PHY ID = %u\n",
   9241 			    sc->sc_phytype, new_phytype);
   9242 		}
   9243 	}
   9244 
   9245 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9246 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9247 		/* SGMII */
   9248 		new_readreg = wm_sgmii_readreg;
   9249 		new_writereg = wm_sgmii_writereg;
   9250 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9251 		/* BM2 (phyaddr == 1) */
   9252 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9253 		    && (new_phytype != WMPHY_BM)
   9254 		    && (new_phytype != WMPHY_UNKNOWN))
   9255 			doubt_phytype = new_phytype;
   9256 		new_phytype = WMPHY_BM;
   9257 		new_readreg = wm_gmii_bm_readreg;
   9258 		new_writereg = wm_gmii_bm_writereg;
   9259 	} else if (sc->sc_type >= WM_T_PCH) {
   9260 		/* All PCH* use _hv_ */
   9261 		new_readreg = wm_gmii_hv_readreg;
   9262 		new_writereg = wm_gmii_hv_writereg;
   9263 	} else if (sc->sc_type >= WM_T_ICH8) {
   9264 		/* non-82567 ICH8, 9 and 10 */
   9265 		new_readreg = wm_gmii_i82544_readreg;
   9266 		new_writereg = wm_gmii_i82544_writereg;
   9267 	} else if (sc->sc_type >= WM_T_80003) {
   9268 		/* 80003 */
   9269 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9270 		    && (new_phytype != WMPHY_GG82563)
   9271 		    && (new_phytype != WMPHY_UNKNOWN))
   9272 			doubt_phytype = new_phytype;
   9273 		new_phytype = WMPHY_GG82563;
   9274 		new_readreg = wm_gmii_i80003_readreg;
   9275 		new_writereg = wm_gmii_i80003_writereg;
   9276 	} else if (sc->sc_type >= WM_T_I210) {
   9277 		/* I210 and I211 */
   9278 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9279 		    && (new_phytype != WMPHY_I210)
   9280 		    && (new_phytype != WMPHY_UNKNOWN))
   9281 			doubt_phytype = new_phytype;
   9282 		new_phytype = WMPHY_I210;
   9283 		new_readreg = wm_gmii_gs40g_readreg;
   9284 		new_writereg = wm_gmii_gs40g_writereg;
   9285 	} else if (sc->sc_type >= WM_T_82580) {
   9286 		/* 82580, I350 and I354 */
   9287 		new_readreg = wm_gmii_82580_readreg;
   9288 		new_writereg = wm_gmii_82580_writereg;
   9289 	} else if (sc->sc_type >= WM_T_82544) {
   9290 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9291 		new_readreg = wm_gmii_i82544_readreg;
   9292 		new_writereg = wm_gmii_i82544_writereg;
   9293 	} else {
   9294 		new_readreg = wm_gmii_i82543_readreg;
   9295 		new_writereg = wm_gmii_i82543_writereg;
   9296 	}
   9297 
   9298 	if (new_phytype == WMPHY_BM) {
   9299 		/* All BM use _bm_ */
   9300 		new_readreg = wm_gmii_bm_readreg;
   9301 		new_writereg = wm_gmii_bm_writereg;
   9302 	}
   9303 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9304 		/* All PCH* use _hv_ */
   9305 		new_readreg = wm_gmii_hv_readreg;
   9306 		new_writereg = wm_gmii_hv_writereg;
   9307 	}
   9308 
   9309 	/* Diag output */
   9310 	if (doubt_phytype != WMPHY_UNKNOWN)
   9311 		aprint_error_dev(dev, "Assumed new PHY type was "
   9312 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9313 		    new_phytype);
   9314 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9315 	    && (sc->sc_phytype != new_phytype ))
   9316 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9317 		    "was incorrect. New PHY type = %u\n",
   9318 		    sc->sc_phytype, new_phytype);
   9319 
   9320 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9321 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9322 
   9323 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9324 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9325 		    "function was incorrect.\n");
   9326 
   9327 	/* Update now */
   9328 	sc->sc_phytype = new_phytype;
   9329 	mii->mii_readreg = new_readreg;
   9330 	mii->mii_writereg = new_writereg;
   9331 }
   9332 
   9333 /*
   9334  * wm_get_phy_id_82575:
   9335  *
   9336  * Return PHY ID. Return -1 if it failed.
   9337  */
   9338 static int
   9339 wm_get_phy_id_82575(struct wm_softc *sc)
   9340 {
   9341 	uint32_t reg;
   9342 	int phyid = -1;
   9343 
   9344 	/* XXX */
   9345 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9346 		return -1;
   9347 
   9348 	if (wm_sgmii_uses_mdio(sc)) {
   9349 		switch (sc->sc_type) {
   9350 		case WM_T_82575:
   9351 		case WM_T_82576:
   9352 			reg = CSR_READ(sc, WMREG_MDIC);
   9353 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9354 			break;
   9355 		case WM_T_82580:
   9356 		case WM_T_I350:
   9357 		case WM_T_I354:
   9358 		case WM_T_I210:
   9359 		case WM_T_I211:
   9360 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9361 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9362 			break;
   9363 		default:
   9364 			return -1;
   9365 		}
   9366 	}
   9367 
   9368 	return phyid;
   9369 }
   9370 
   9371 
   9372 /*
   9373  * wm_gmii_mediainit:
   9374  *
   9375  *	Initialize media for use on 1000BASE-T devices.
   9376  */
   9377 static void
   9378 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9379 {
   9380 	device_t dev = sc->sc_dev;
   9381 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9382 	struct mii_data *mii = &sc->sc_mii;
   9383 	uint32_t reg;
   9384 
   9385 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9386 		device_xname(sc->sc_dev), __func__));
   9387 
   9388 	/* We have GMII. */
   9389 	sc->sc_flags |= WM_F_HAS_MII;
   9390 
   9391 	if (sc->sc_type == WM_T_80003)
   9392 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9393 	else
   9394 		sc->sc_tipg = TIPG_1000T_DFLT;
   9395 
   9396 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9397 	if ((sc->sc_type == WM_T_82580)
   9398 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9399 	    || (sc->sc_type == WM_T_I211)) {
   9400 		reg = CSR_READ(sc, WMREG_PHPM);
   9401 		reg &= ~PHPM_GO_LINK_D;
   9402 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9403 	}
   9404 
   9405 	/*
   9406 	 * Let the chip set speed/duplex on its own based on
   9407 	 * signals from the PHY.
   9408 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9409 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9410 	 */
   9411 	sc->sc_ctrl |= CTRL_SLU;
   9412 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9413 
   9414 	/* Initialize our media structures and probe the GMII. */
   9415 	mii->mii_ifp = ifp;
   9416 
   9417 	/*
   9418 	 * The first call of wm_mii_setup_phytype. The result might be
   9419 	 * incorrect.
   9420 	 */
   9421 	wm_gmii_setup_phytype(sc, 0, 0);
   9422 
   9423 	mii->mii_statchg = wm_gmii_statchg;
   9424 
   9425 	/* get PHY control from SMBus to PCIe */
   9426 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9427 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9428 		wm_smbustopci(sc);
   9429 
   9430 	wm_gmii_reset(sc);
   9431 
   9432 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9433 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9434 	    wm_gmii_mediastatus);
   9435 
   9436 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9437 	    || (sc->sc_type == WM_T_82580)
   9438 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9439 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9440 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9441 			/* Attach only one port */
   9442 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9443 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9444 		} else {
   9445 			int i, id;
   9446 			uint32_t ctrl_ext;
   9447 
   9448 			id = wm_get_phy_id_82575(sc);
   9449 			if (id != -1) {
   9450 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9451 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9452 			}
   9453 			if ((id == -1)
   9454 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9455 				/* Power on sgmii phy if it is disabled */
   9456 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9457 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9458 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9459 				CSR_WRITE_FLUSH(sc);
   9460 				delay(300*1000); /* XXX too long */
   9461 
   9462 				/* from 1 to 8 */
   9463 				for (i = 1; i < 8; i++)
   9464 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9465 					    0xffffffff, i, MII_OFFSET_ANY,
   9466 					    MIIF_DOPAUSE);
   9467 
   9468 				/* restore previous sfp cage power state */
   9469 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9470 			}
   9471 		}
   9472 	} else {
   9473 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9474 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9475 	}
   9476 
   9477 	/*
   9478 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9479 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9480 	 */
   9481 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9482 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9483 		wm_set_mdio_slow_mode_hv(sc);
   9484 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9485 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9486 	}
   9487 
   9488 	/*
   9489 	 * (For ICH8 variants)
   9490 	 * If PHY detection failed, use BM's r/w function and retry.
   9491 	 */
   9492 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9493 		/* if failed, retry with *_bm_* */
   9494 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9495 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9496 		    sc->sc_phytype);
   9497 		sc->sc_phytype = WMPHY_BM;
   9498 		mii->mii_readreg = wm_gmii_bm_readreg;
   9499 		mii->mii_writereg = wm_gmii_bm_writereg;
   9500 
   9501 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9502 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9503 	}
   9504 
   9505 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9506 		/* Any PHY wasn't find */
   9507 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9508 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9509 		sc->sc_phytype = WMPHY_NONE;
   9510 	} else {
   9511 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9512 
   9513 		/*
   9514 		 * PHY Found! Check PHY type again by the second call of
   9515 		 * wm_mii_setup_phytype.
   9516 		 */
   9517 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9518 		    child->mii_mpd_model);
   9519 
   9520 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9521 	}
   9522 }
   9523 
   9524 /*
   9525  * wm_gmii_mediachange:	[ifmedia interface function]
   9526  *
   9527  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9528  */
   9529 static int
   9530 wm_gmii_mediachange(struct ifnet *ifp)
   9531 {
   9532 	struct wm_softc *sc = ifp->if_softc;
   9533 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9534 	int rc;
   9535 
   9536 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9537 		device_xname(sc->sc_dev), __func__));
   9538 	if ((ifp->if_flags & IFF_UP) == 0)
   9539 		return 0;
   9540 
   9541 	/* Disable D0 LPLU. */
   9542 	wm_lplu_d0_disable(sc);
   9543 
   9544 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9545 	sc->sc_ctrl |= CTRL_SLU;
   9546 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9547 	    || (sc->sc_type > WM_T_82543)) {
   9548 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9549 	} else {
   9550 		sc->sc_ctrl &= ~CTRL_ASDE;
   9551 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9552 		if (ife->ifm_media & IFM_FDX)
   9553 			sc->sc_ctrl |= CTRL_FD;
   9554 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9555 		case IFM_10_T:
   9556 			sc->sc_ctrl |= CTRL_SPEED_10;
   9557 			break;
   9558 		case IFM_100_TX:
   9559 			sc->sc_ctrl |= CTRL_SPEED_100;
   9560 			break;
   9561 		case IFM_1000_T:
   9562 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9563 			break;
   9564 		default:
   9565 			panic("wm_gmii_mediachange: bad media 0x%x",
   9566 			    ife->ifm_media);
   9567 		}
   9568 	}
   9569 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9570 	CSR_WRITE_FLUSH(sc);
   9571 	if (sc->sc_type <= WM_T_82543)
   9572 		wm_gmii_reset(sc);
   9573 
   9574 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9575 		return 0;
   9576 	return rc;
   9577 }
   9578 
   9579 /*
   9580  * wm_gmii_mediastatus:	[ifmedia interface function]
   9581  *
   9582  *	Get the current interface media status on a 1000BASE-T device.
   9583  */
   9584 static void
   9585 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9586 {
   9587 	struct wm_softc *sc = ifp->if_softc;
   9588 
   9589 	ether_mediastatus(ifp, ifmr);
   9590 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9591 	    | sc->sc_flowflags;
   9592 }
   9593 
   9594 #define	MDI_IO		CTRL_SWDPIN(2)
   9595 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9596 #define	MDI_CLK		CTRL_SWDPIN(3)
   9597 
   9598 static void
   9599 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9600 {
   9601 	uint32_t i, v;
   9602 
   9603 	v = CSR_READ(sc, WMREG_CTRL);
   9604 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9605 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9606 
   9607 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9608 		if (data & i)
   9609 			v |= MDI_IO;
   9610 		else
   9611 			v &= ~MDI_IO;
   9612 		CSR_WRITE(sc, WMREG_CTRL, v);
   9613 		CSR_WRITE_FLUSH(sc);
   9614 		delay(10);
   9615 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9616 		CSR_WRITE_FLUSH(sc);
   9617 		delay(10);
   9618 		CSR_WRITE(sc, WMREG_CTRL, v);
   9619 		CSR_WRITE_FLUSH(sc);
   9620 		delay(10);
   9621 	}
   9622 }
   9623 
   9624 static uint32_t
   9625 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9626 {
   9627 	uint32_t v, i, data = 0;
   9628 
   9629 	v = CSR_READ(sc, WMREG_CTRL);
   9630 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9631 	v |= CTRL_SWDPIO(3);
   9632 
   9633 	CSR_WRITE(sc, WMREG_CTRL, v);
   9634 	CSR_WRITE_FLUSH(sc);
   9635 	delay(10);
   9636 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9637 	CSR_WRITE_FLUSH(sc);
   9638 	delay(10);
   9639 	CSR_WRITE(sc, WMREG_CTRL, v);
   9640 	CSR_WRITE_FLUSH(sc);
   9641 	delay(10);
   9642 
   9643 	for (i = 0; i < 16; i++) {
   9644 		data <<= 1;
   9645 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9646 		CSR_WRITE_FLUSH(sc);
   9647 		delay(10);
   9648 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9649 			data |= 1;
   9650 		CSR_WRITE(sc, WMREG_CTRL, v);
   9651 		CSR_WRITE_FLUSH(sc);
   9652 		delay(10);
   9653 	}
   9654 
   9655 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9656 	CSR_WRITE_FLUSH(sc);
   9657 	delay(10);
   9658 	CSR_WRITE(sc, WMREG_CTRL, v);
   9659 	CSR_WRITE_FLUSH(sc);
   9660 	delay(10);
   9661 
   9662 	return data;
   9663 }
   9664 
   9665 #undef MDI_IO
   9666 #undef MDI_DIR
   9667 #undef MDI_CLK
   9668 
   9669 /*
   9670  * wm_gmii_i82543_readreg:	[mii interface function]
   9671  *
   9672  *	Read a PHY register on the GMII (i82543 version).
   9673  */
   9674 static int
   9675 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9676 {
   9677 	struct wm_softc *sc = device_private(dev);
   9678 	int rv;
   9679 
   9680 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9681 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9682 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9683 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9684 
   9685 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9686 	    device_xname(dev), phy, reg, rv));
   9687 
   9688 	return rv;
   9689 }
   9690 
   9691 /*
   9692  * wm_gmii_i82543_writereg:	[mii interface function]
   9693  *
   9694  *	Write a PHY register on the GMII (i82543 version).
   9695  */
   9696 static void
   9697 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   9698 {
   9699 	struct wm_softc *sc = device_private(dev);
   9700 
   9701 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9702 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9703 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9704 	    (MII_COMMAND_START << 30), 32);
   9705 }
   9706 
   9707 /*
   9708  * wm_gmii_mdic_readreg:	[mii interface function]
   9709  *
   9710  *	Read a PHY register on the GMII.
   9711  */
   9712 static int
   9713 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   9714 {
   9715 	struct wm_softc *sc = device_private(dev);
   9716 	uint32_t mdic = 0;
   9717 	int i, rv;
   9718 
   9719 	if (reg > MII_ADDRMASK) {
   9720 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9721 		    __func__, sc->sc_phytype, reg);
   9722 		reg &= MII_ADDRMASK;
   9723 	}
   9724 
   9725 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9726 	    MDIC_REGADD(reg));
   9727 
   9728 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9729 		mdic = CSR_READ(sc, WMREG_MDIC);
   9730 		if (mdic & MDIC_READY)
   9731 			break;
   9732 		delay(50);
   9733 	}
   9734 
   9735 	if ((mdic & MDIC_READY) == 0) {
   9736 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9737 		    device_xname(dev), phy, reg);
   9738 		rv = 0;
   9739 	} else if (mdic & MDIC_E) {
   9740 #if 0 /* This is normal if no PHY is present. */
   9741 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9742 		    device_xname(dev), phy, reg);
   9743 #endif
   9744 		rv = 0;
   9745 	} else {
   9746 		rv = MDIC_DATA(mdic);
   9747 		if (rv == 0xffff)
   9748 			rv = 0;
   9749 	}
   9750 
   9751 	return rv;
   9752 }
   9753 
   9754 /*
   9755  * wm_gmii_mdic_writereg:	[mii interface function]
   9756  *
   9757  *	Write a PHY register on the GMII.
   9758  */
   9759 static void
   9760 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   9761 {
   9762 	struct wm_softc *sc = device_private(dev);
   9763 	uint32_t mdic = 0;
   9764 	int i;
   9765 
   9766 	if (reg > MII_ADDRMASK) {
   9767 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9768 		    __func__, sc->sc_phytype, reg);
   9769 		reg &= MII_ADDRMASK;
   9770 	}
   9771 
   9772 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9773 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9774 
   9775 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9776 		mdic = CSR_READ(sc, WMREG_MDIC);
   9777 		if (mdic & MDIC_READY)
   9778 			break;
   9779 		delay(50);
   9780 	}
   9781 
   9782 	if ((mdic & MDIC_READY) == 0)
   9783 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9784 		    device_xname(dev), phy, reg);
   9785 	else if (mdic & MDIC_E)
   9786 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9787 		    device_xname(dev), phy, reg);
   9788 }
   9789 
   9790 /*
   9791  * wm_gmii_i82544_readreg:	[mii interface function]
   9792  *
   9793  *	Read a PHY register on the GMII.
   9794  */
   9795 static int
   9796 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   9797 {
   9798 	struct wm_softc *sc = device_private(dev);
   9799 	int rv;
   9800 
   9801 	if (sc->phy.acquire(sc)) {
   9802 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9803 		return 0;
   9804 	}
   9805 
   9806 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9807 		switch (sc->sc_phytype) {
   9808 		case WMPHY_IGP:
   9809 		case WMPHY_IGP_2:
   9810 		case WMPHY_IGP_3:
   9811 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9812 			break;
   9813 		default:
   9814 #ifdef WM_DEBUG
   9815 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   9816 			    __func__, sc->sc_phytype, reg);
   9817 #endif
   9818 			break;
   9819 		}
   9820 	}
   9821 
   9822 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9823 	sc->phy.release(sc);
   9824 
   9825 	return rv;
   9826 }
   9827 
   9828 /*
   9829  * wm_gmii_i82544_writereg:	[mii interface function]
   9830  *
   9831  *	Write a PHY register on the GMII.
   9832  */
   9833 static void
   9834 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   9835 {
   9836 	struct wm_softc *sc = device_private(dev);
   9837 
   9838 	if (sc->phy.acquire(sc)) {
   9839 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9840 		return;
   9841 	}
   9842 
   9843 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9844 		switch (sc->sc_phytype) {
   9845 		case WMPHY_IGP:
   9846 		case WMPHY_IGP_2:
   9847 		case WMPHY_IGP_3:
   9848 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9849 			break;
   9850 		default:
   9851 #ifdef WM_DEBUG
   9852 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   9853 			    __func__, sc->sc_phytype, reg);
   9854 #endif
   9855 			break;
   9856 		}
   9857 	}
   9858 
   9859 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   9860 	sc->phy.release(sc);
   9861 }
   9862 
   9863 /*
   9864  * wm_gmii_i80003_readreg:	[mii interface function]
   9865  *
   9866  *	Read a PHY register on the kumeran
   9867  * This could be handled by the PHY layer if we didn't have to lock the
   9868  * ressource ...
   9869  */
   9870 static int
   9871 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   9872 {
   9873 	struct wm_softc *sc = device_private(dev);
   9874 	int rv;
   9875 
   9876 	if (phy != 1) /* only one PHY on kumeran bus */
   9877 		return 0;
   9878 
   9879 	if (sc->phy.acquire(sc)) {
   9880 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9881 		return 0;
   9882 	}
   9883 
   9884 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9885 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT,
   9886 		    reg >> GG82563_PAGE_SHIFT);
   9887 	} else {
   9888 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9889 		    reg >> GG82563_PAGE_SHIFT);
   9890 	}
   9891 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9892 	delay(200);
   9893 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9894 	delay(200);
   9895 	sc->phy.release(sc);
   9896 
   9897 	return rv;
   9898 }
   9899 
   9900 /*
   9901  * wm_gmii_i80003_writereg:	[mii interface function]
   9902  *
   9903  *	Write a PHY register on the kumeran.
   9904  * This could be handled by the PHY layer if we didn't have to lock the
   9905  * ressource ...
   9906  */
   9907 static void
   9908 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   9909 {
   9910 	struct wm_softc *sc = device_private(dev);
   9911 
   9912 	if (phy != 1) /* only one PHY on kumeran bus */
   9913 		return;
   9914 
   9915 	if (sc->phy.acquire(sc)) {
   9916 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9917 		return;
   9918 	}
   9919 
   9920 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9921 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT,
   9922 		    reg >> GG82563_PAGE_SHIFT);
   9923 	} else {
   9924 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9925 		    reg >> GG82563_PAGE_SHIFT);
   9926 	}
   9927 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9928 	delay(200);
   9929 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   9930 	delay(200);
   9931 
   9932 	sc->phy.release(sc);
   9933 }
   9934 
   9935 /*
   9936  * wm_gmii_bm_readreg:	[mii interface function]
   9937  *
   9938  *	Read a PHY register on the kumeran
   9939  * This could be handled by the PHY layer if we didn't have to lock the
   9940  * ressource ...
   9941  */
   9942 static int
   9943 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   9944 {
   9945 	struct wm_softc *sc = device_private(dev);
   9946 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9947 	uint16_t val;
   9948 	int rv;
   9949 
   9950 	if (sc->phy.acquire(sc)) {
   9951 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9952 		return 0;
   9953 	}
   9954 
   9955 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9956 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9957 		    || (reg == 31)) ? 1 : phy;
   9958 	/* Page 800 works differently than the rest so it has its own func */
   9959 	if (page == BM_WUC_PAGE) {
   9960 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   9961 		rv = val;
   9962 		goto release;
   9963 	}
   9964 
   9965 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9966 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9967 		    && (sc->sc_type != WM_T_82583))
   9968 			wm_gmii_mdic_writereg(dev, phy,
   9969 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9970 		else
   9971 			wm_gmii_mdic_writereg(dev, phy,
   9972 			    BME1000_PHY_PAGE_SELECT, page);
   9973 	}
   9974 
   9975 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9976 
   9977 release:
   9978 	sc->phy.release(sc);
   9979 	return rv;
   9980 }
   9981 
   9982 /*
   9983  * wm_gmii_bm_writereg:	[mii interface function]
   9984  *
   9985  *	Write a PHY register on the kumeran.
   9986  * This could be handled by the PHY layer if we didn't have to lock the
   9987  * ressource ...
   9988  */
   9989 static void
   9990 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   9991 {
   9992 	struct wm_softc *sc = device_private(dev);
   9993 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9994 
   9995 	if (sc->phy.acquire(sc)) {
   9996 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9997 		return;
   9998 	}
   9999 
   10000 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10001 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10002 		    || (reg == 31)) ? 1 : phy;
   10003 	/* Page 800 works differently than the rest so it has its own func */
   10004 	if (page == BM_WUC_PAGE) {
   10005 		uint16_t tmp;
   10006 
   10007 		tmp = val;
   10008 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10009 		goto release;
   10010 	}
   10011 
   10012 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10013 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10014 		    && (sc->sc_type != WM_T_82583))
   10015 			wm_gmii_mdic_writereg(dev, phy,
   10016 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10017 		else
   10018 			wm_gmii_mdic_writereg(dev, phy,
   10019 			    BME1000_PHY_PAGE_SELECT, page);
   10020 	}
   10021 
   10022 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10023 
   10024 release:
   10025 	sc->phy.release(sc);
   10026 }
   10027 
   10028 static void
   10029 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10030 {
   10031 	struct wm_softc *sc = device_private(dev);
   10032 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10033 	uint16_t wuce, reg;
   10034 
   10035 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10036 		device_xname(dev), __func__));
   10037 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10038 	if (sc->sc_type == WM_T_PCH) {
   10039 		/* XXX e1000 driver do nothing... why? */
   10040 	}
   10041 
   10042 	/*
   10043 	 * 1) Enable PHY wakeup register first.
   10044 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10045 	 */
   10046 
   10047 	/* Set page 769 */
   10048 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10049 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10050 
   10051 	/* Read WUCE and save it */
   10052 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10053 
   10054 	reg = wuce | BM_WUC_ENABLE_BIT;
   10055 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10056 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10057 
   10058 	/* Select page 800 */
   10059 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10060 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10061 
   10062 	/*
   10063 	 * 2) Access PHY wakeup register.
   10064 	 * See e1000_access_phy_wakeup_reg_bm.
   10065 	 */
   10066 
   10067 	/* Write page 800 */
   10068 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10069 
   10070 	if (rd)
   10071 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10072 	else
   10073 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10074 
   10075 	/*
   10076 	 * 3) Disable PHY wakeup register.
   10077 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10078 	 */
   10079 	/* Set page 769 */
   10080 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10081 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10082 
   10083 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10084 }
   10085 
   10086 /*
   10087  * wm_gmii_hv_readreg:	[mii interface function]
   10088  *
   10089  *	Read a PHY register on the kumeran
   10090  * This could be handled by the PHY layer if we didn't have to lock the
   10091  * ressource ...
   10092  */
   10093 static int
   10094 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10095 {
   10096 	struct wm_softc *sc = device_private(dev);
   10097 	int rv;
   10098 
   10099 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10100 		device_xname(dev), __func__));
   10101 	if (sc->phy.acquire(sc)) {
   10102 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10103 		return 0;
   10104 	}
   10105 
   10106 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10107 	sc->phy.release(sc);
   10108 	return rv;
   10109 }
   10110 
   10111 static int
   10112 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10113 {
   10114 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10115 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10116 	uint16_t val;
   10117 	int rv;
   10118 
   10119 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10120 
   10121 	/* Page 800 works differently than the rest so it has its own func */
   10122 	if (page == BM_WUC_PAGE) {
   10123 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10124 		return val;
   10125 	}
   10126 
   10127 	/*
   10128 	 * Lower than page 768 works differently than the rest so it has its
   10129 	 * own func
   10130 	 */
   10131 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10132 		printf("gmii_hv_readreg!!!\n");
   10133 		return 0;
   10134 	}
   10135 
   10136 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10137 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10138 		    page << BME1000_PAGE_SHIFT);
   10139 	}
   10140 
   10141 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10142 	return rv;
   10143 }
   10144 
   10145 /*
   10146  * wm_gmii_hv_writereg:	[mii interface function]
   10147  *
   10148  *	Write a PHY register on the kumeran.
   10149  * This could be handled by the PHY layer if we didn't have to lock the
   10150  * ressource ...
   10151  */
   10152 static void
   10153 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10154 {
   10155 	struct wm_softc *sc = device_private(dev);
   10156 
   10157 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10158 		device_xname(dev), __func__));
   10159 
   10160 	if (sc->phy.acquire(sc)) {
   10161 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10162 		return;
   10163 	}
   10164 
   10165 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10166 	sc->phy.release(sc);
   10167 }
   10168 
   10169 static void
   10170 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10171 {
   10172 	struct wm_softc *sc = device_private(dev);
   10173 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10174 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10175 
   10176 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10177 
   10178 	/* Page 800 works differently than the rest so it has its own func */
   10179 	if (page == BM_WUC_PAGE) {
   10180 		uint16_t tmp;
   10181 
   10182 		tmp = val;
   10183 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10184 		return;
   10185 	}
   10186 
   10187 	/*
   10188 	 * Lower than page 768 works differently than the rest so it has its
   10189 	 * own func
   10190 	 */
   10191 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10192 		printf("gmii_hv_writereg!!!\n");
   10193 		return;
   10194 	}
   10195 
   10196 	{
   10197 		/*
   10198 		 * XXX Workaround MDIO accesses being disabled after entering
   10199 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10200 		 * register is set)
   10201 		 */
   10202 		if (sc->sc_phytype == WMPHY_82578) {
   10203 			struct mii_softc *child;
   10204 
   10205 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10206 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10207 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10208 			    && ((val & (1 << 11)) != 0)) {
   10209 				printf("XXX need workaround\n");
   10210 			}
   10211 		}
   10212 
   10213 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10214 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10215 			    page << BME1000_PAGE_SHIFT);
   10216 		}
   10217 	}
   10218 
   10219 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10220 }
   10221 
   10222 /*
   10223  * wm_gmii_82580_readreg:	[mii interface function]
   10224  *
   10225  *	Read a PHY register on the 82580 and I350.
   10226  * This could be handled by the PHY layer if we didn't have to lock the
   10227  * ressource ...
   10228  */
   10229 static int
   10230 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10231 {
   10232 	struct wm_softc *sc = device_private(dev);
   10233 	int rv;
   10234 
   10235 	if (sc->phy.acquire(sc) != 0) {
   10236 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10237 		return 0;
   10238 	}
   10239 
   10240 #ifdef DIAGNOSTIC
   10241 	if (reg > MII_ADDRMASK) {
   10242 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10243 		    __func__, sc->sc_phytype, reg);
   10244 		reg &= MII_ADDRMASK;
   10245 	}
   10246 #endif
   10247 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10248 
   10249 	sc->phy.release(sc);
   10250 	return rv;
   10251 }
   10252 
   10253 /*
   10254  * wm_gmii_82580_writereg:	[mii interface function]
   10255  *
   10256  *	Write a PHY register on the 82580 and I350.
   10257  * This could be handled by the PHY layer if we didn't have to lock the
   10258  * ressource ...
   10259  */
   10260 static void
   10261 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10262 {
   10263 	struct wm_softc *sc = device_private(dev);
   10264 
   10265 	if (sc->phy.acquire(sc) != 0) {
   10266 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10267 		return;
   10268 	}
   10269 
   10270 #ifdef DIAGNOSTIC
   10271 	if (reg > MII_ADDRMASK) {
   10272 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10273 		    __func__, sc->sc_phytype, reg);
   10274 		reg &= MII_ADDRMASK;
   10275 	}
   10276 #endif
   10277 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10278 
   10279 	sc->phy.release(sc);
   10280 }
   10281 
   10282 /*
   10283  * wm_gmii_gs40g_readreg:	[mii interface function]
   10284  *
   10285  *	Read a PHY register on the I2100 and I211.
   10286  * This could be handled by the PHY layer if we didn't have to lock the
   10287  * ressource ...
   10288  */
   10289 static int
   10290 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10291 {
   10292 	struct wm_softc *sc = device_private(dev);
   10293 	int page, offset;
   10294 	int rv;
   10295 
   10296 	/* Acquire semaphore */
   10297 	if (sc->phy.acquire(sc)) {
   10298 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10299 		return 0;
   10300 	}
   10301 
   10302 	/* Page select */
   10303 	page = reg >> GS40G_PAGE_SHIFT;
   10304 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10305 
   10306 	/* Read reg */
   10307 	offset = reg & GS40G_OFFSET_MASK;
   10308 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10309 
   10310 	sc->phy.release(sc);
   10311 	return rv;
   10312 }
   10313 
   10314 /*
   10315  * wm_gmii_gs40g_writereg:	[mii interface function]
   10316  *
   10317  *	Write a PHY register on the I210 and I211.
   10318  * This could be handled by the PHY layer if we didn't have to lock the
   10319  * ressource ...
   10320  */
   10321 static void
   10322 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10323 {
   10324 	struct wm_softc *sc = device_private(dev);
   10325 	int page, offset;
   10326 
   10327 	/* Acquire semaphore */
   10328 	if (sc->phy.acquire(sc)) {
   10329 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10330 		return;
   10331 	}
   10332 
   10333 	/* Page select */
   10334 	page = reg >> GS40G_PAGE_SHIFT;
   10335 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10336 
   10337 	/* Write reg */
   10338 	offset = reg & GS40G_OFFSET_MASK;
   10339 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10340 
   10341 	/* Release semaphore */
   10342 	sc->phy.release(sc);
   10343 }
   10344 
   10345 /*
   10346  * wm_gmii_statchg:	[mii interface function]
   10347  *
   10348  *	Callback from MII layer when media changes.
   10349  */
   10350 static void
   10351 wm_gmii_statchg(struct ifnet *ifp)
   10352 {
   10353 	struct wm_softc *sc = ifp->if_softc;
   10354 	struct mii_data *mii = &sc->sc_mii;
   10355 
   10356 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10357 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10358 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10359 
   10360 	/*
   10361 	 * Get flow control negotiation result.
   10362 	 */
   10363 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10364 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10365 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10366 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10367 	}
   10368 
   10369 	if (sc->sc_flowflags & IFM_FLOW) {
   10370 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10371 			sc->sc_ctrl |= CTRL_TFCE;
   10372 			sc->sc_fcrtl |= FCRTL_XONE;
   10373 		}
   10374 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10375 			sc->sc_ctrl |= CTRL_RFCE;
   10376 	}
   10377 
   10378 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10379 		DPRINTF(WM_DEBUG_LINK,
   10380 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10381 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10382 	} else {
   10383 		DPRINTF(WM_DEBUG_LINK,
   10384 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10385 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10386 	}
   10387 
   10388 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10389 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10390 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10391 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10392 	if (sc->sc_type == WM_T_80003) {
   10393 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10394 		case IFM_1000_T:
   10395 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10396 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10397 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10398 			break;
   10399 		default:
   10400 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10401 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10402 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10403 			break;
   10404 		}
   10405 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10406 	}
   10407 }
   10408 
   10409 /* kumeran related (80003, ICH* and PCH*) */
   10410 
   10411 /*
   10412  * wm_kmrn_readreg:
   10413  *
   10414  *	Read a kumeran register
   10415  */
   10416 static int
   10417 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   10418 {
   10419 	int rv;
   10420 
   10421 	if (sc->sc_type == WM_T_80003)
   10422 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10423 	else
   10424 		rv = sc->phy.acquire(sc);
   10425 	if (rv != 0) {
   10426 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10427 		    __func__);
   10428 		return 0;
   10429 	}
   10430 
   10431 	rv = wm_kmrn_readreg_locked(sc, reg);
   10432 
   10433 	if (sc->sc_type == WM_T_80003)
   10434 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10435 	else
   10436 		sc->phy.release(sc);
   10437 
   10438 	return rv;
   10439 }
   10440 
   10441 static int
   10442 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10443 {
   10444 	int rv;
   10445 
   10446 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10447 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10448 	    KUMCTRLSTA_REN);
   10449 	CSR_WRITE_FLUSH(sc);
   10450 	delay(2);
   10451 
   10452 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10453 
   10454 	return rv;
   10455 }
   10456 
   10457 /*
   10458  * wm_kmrn_writereg:
   10459  *
   10460  *	Write a kumeran register
   10461  */
   10462 static void
   10463 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10464 {
   10465 	int rv;
   10466 
   10467 	if (sc->sc_type == WM_T_80003)
   10468 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10469 	else
   10470 		rv = sc->phy.acquire(sc);
   10471 	if (rv != 0) {
   10472 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10473 		    __func__);
   10474 		return;
   10475 	}
   10476 
   10477 	wm_kmrn_writereg_locked(sc, reg, val);
   10478 
   10479 	if (sc->sc_type == WM_T_80003)
   10480 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10481 	else
   10482 		sc->phy.release(sc);
   10483 }
   10484 
   10485 static void
   10486 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10487 {
   10488 
   10489 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10490 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10491 	    (val & KUMCTRLSTA_MASK));
   10492 }
   10493 
   10494 /* SGMII related */
   10495 
   10496 /*
   10497  * wm_sgmii_uses_mdio
   10498  *
   10499  * Check whether the transaction is to the internal PHY or the external
   10500  * MDIO interface. Return true if it's MDIO.
   10501  */
   10502 static bool
   10503 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10504 {
   10505 	uint32_t reg;
   10506 	bool ismdio = false;
   10507 
   10508 	switch (sc->sc_type) {
   10509 	case WM_T_82575:
   10510 	case WM_T_82576:
   10511 		reg = CSR_READ(sc, WMREG_MDIC);
   10512 		ismdio = ((reg & MDIC_DEST) != 0);
   10513 		break;
   10514 	case WM_T_82580:
   10515 	case WM_T_I350:
   10516 	case WM_T_I354:
   10517 	case WM_T_I210:
   10518 	case WM_T_I211:
   10519 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10520 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10521 		break;
   10522 	default:
   10523 		break;
   10524 	}
   10525 
   10526 	return ismdio;
   10527 }
   10528 
   10529 /*
   10530  * wm_sgmii_readreg:	[mii interface function]
   10531  *
   10532  *	Read a PHY register on the SGMII
   10533  * This could be handled by the PHY layer if we didn't have to lock the
   10534  * ressource ...
   10535  */
   10536 static int
   10537 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10538 {
   10539 	struct wm_softc *sc = device_private(dev);
   10540 	uint32_t i2ccmd;
   10541 	int i, rv;
   10542 
   10543 	if (sc->phy.acquire(sc)) {
   10544 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10545 		return 0;
   10546 	}
   10547 
   10548 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10549 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10550 	    | I2CCMD_OPCODE_READ;
   10551 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10552 
   10553 	/* Poll the ready bit */
   10554 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10555 		delay(50);
   10556 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10557 		if (i2ccmd & I2CCMD_READY)
   10558 			break;
   10559 	}
   10560 	if ((i2ccmd & I2CCMD_READY) == 0)
   10561 		device_printf(dev, "I2CCMD Read did not complete\n");
   10562 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10563 		device_printf(dev, "I2CCMD Error bit set\n");
   10564 
   10565 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10566 
   10567 	sc->phy.release(sc);
   10568 	return rv;
   10569 }
   10570 
   10571 /*
   10572  * wm_sgmii_writereg:	[mii interface function]
   10573  *
   10574  *	Write a PHY register on the SGMII.
   10575  * This could be handled by the PHY layer if we didn't have to lock the
   10576  * ressource ...
   10577  */
   10578 static void
   10579 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10580 {
   10581 	struct wm_softc *sc = device_private(dev);
   10582 	uint32_t i2ccmd;
   10583 	int i;
   10584 	int val_swapped;
   10585 
   10586 	if (sc->phy.acquire(sc) != 0) {
   10587 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10588 		return;
   10589 	}
   10590 	/* Swap the data bytes for the I2C interface */
   10591 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10592 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10593 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10594 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10595 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10596 
   10597 	/* Poll the ready bit */
   10598 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10599 		delay(50);
   10600 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10601 		if (i2ccmd & I2CCMD_READY)
   10602 			break;
   10603 	}
   10604 	if ((i2ccmd & I2CCMD_READY) == 0)
   10605 		device_printf(dev, "I2CCMD Write did not complete\n");
   10606 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10607 		device_printf(dev, "I2CCMD Error bit set\n");
   10608 
   10609 	sc->phy.release(sc);
   10610 }
   10611 
   10612 /* TBI related */
   10613 
   10614 /*
   10615  * wm_tbi_mediainit:
   10616  *
   10617  *	Initialize media for use on 1000BASE-X devices.
   10618  */
   10619 static void
   10620 wm_tbi_mediainit(struct wm_softc *sc)
   10621 {
   10622 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10623 	const char *sep = "";
   10624 
   10625 	if (sc->sc_type < WM_T_82543)
   10626 		sc->sc_tipg = TIPG_WM_DFLT;
   10627 	else
   10628 		sc->sc_tipg = TIPG_LG_DFLT;
   10629 
   10630 	sc->sc_tbi_serdes_anegticks = 5;
   10631 
   10632 	/* Initialize our media structures */
   10633 	sc->sc_mii.mii_ifp = ifp;
   10634 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10635 
   10636 	if ((sc->sc_type >= WM_T_82575)
   10637 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10638 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10639 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10640 	else
   10641 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10642 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10643 
   10644 	/*
   10645 	 * SWD Pins:
   10646 	 *
   10647 	 *	0 = Link LED (output)
   10648 	 *	1 = Loss Of Signal (input)
   10649 	 */
   10650 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10651 
   10652 	/* XXX Perhaps this is only for TBI */
   10653 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10654 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10655 
   10656 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10657 		sc->sc_ctrl &= ~CTRL_LRST;
   10658 
   10659 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10660 
   10661 #define	ADD(ss, mm, dd)							\
   10662 do {									\
   10663 	aprint_normal("%s%s", sep, ss);					\
   10664 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10665 	sep = ", ";							\
   10666 } while (/*CONSTCOND*/0)
   10667 
   10668 	aprint_normal_dev(sc->sc_dev, "");
   10669 
   10670 	if (sc->sc_type == WM_T_I354) {
   10671 		uint32_t status;
   10672 
   10673 		status = CSR_READ(sc, WMREG_STATUS);
   10674 		if (((status & STATUS_2P5_SKU) != 0)
   10675 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10676 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10677 		} else
   10678 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10679 	} else if (sc->sc_type == WM_T_82545) {
   10680 		/* Only 82545 is LX (XXX except SFP) */
   10681 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10682 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10683 	} else {
   10684 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10685 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10686 	}
   10687 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10688 	aprint_normal("\n");
   10689 
   10690 #undef ADD
   10691 
   10692 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10693 }
   10694 
   10695 /*
   10696  * wm_tbi_mediachange:	[ifmedia interface function]
   10697  *
   10698  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10699  */
   10700 static int
   10701 wm_tbi_mediachange(struct ifnet *ifp)
   10702 {
   10703 	struct wm_softc *sc = ifp->if_softc;
   10704 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10705 	uint32_t status;
   10706 	int i;
   10707 
   10708 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10709 		/* XXX need some work for >= 82571 and < 82575 */
   10710 		if (sc->sc_type < WM_T_82575)
   10711 			return 0;
   10712 	}
   10713 
   10714 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10715 	    || (sc->sc_type >= WM_T_82575))
   10716 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10717 
   10718 	sc->sc_ctrl &= ~CTRL_LRST;
   10719 	sc->sc_txcw = TXCW_ANE;
   10720 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10721 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10722 	else if (ife->ifm_media & IFM_FDX)
   10723 		sc->sc_txcw |= TXCW_FD;
   10724 	else
   10725 		sc->sc_txcw |= TXCW_HD;
   10726 
   10727 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10728 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10729 
   10730 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10731 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10732 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10733 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10734 	CSR_WRITE_FLUSH(sc);
   10735 	delay(1000);
   10736 
   10737 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10738 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10739 
   10740 	/*
   10741 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10742 	 * optics detect a signal, 0 if they don't.
   10743 	 */
   10744 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10745 		/* Have signal; wait for the link to come up. */
   10746 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10747 			delay(10000);
   10748 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10749 				break;
   10750 		}
   10751 
   10752 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10753 			    device_xname(sc->sc_dev),i));
   10754 
   10755 		status = CSR_READ(sc, WMREG_STATUS);
   10756 		DPRINTF(WM_DEBUG_LINK,
   10757 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10758 			device_xname(sc->sc_dev),status, STATUS_LU));
   10759 		if (status & STATUS_LU) {
   10760 			/* Link is up. */
   10761 			DPRINTF(WM_DEBUG_LINK,
   10762 			    ("%s: LINK: set media -> link up %s\n",
   10763 			    device_xname(sc->sc_dev),
   10764 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10765 
   10766 			/*
   10767 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10768 			 * so we should update sc->sc_ctrl
   10769 			 */
   10770 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10771 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10772 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10773 			if (status & STATUS_FD)
   10774 				sc->sc_tctl |=
   10775 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10776 			else
   10777 				sc->sc_tctl |=
   10778 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10779 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10780 				sc->sc_fcrtl |= FCRTL_XONE;
   10781 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10782 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10783 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10784 				      sc->sc_fcrtl);
   10785 			sc->sc_tbi_linkup = 1;
   10786 		} else {
   10787 			if (i == WM_LINKUP_TIMEOUT)
   10788 				wm_check_for_link(sc);
   10789 			/* Link is down. */
   10790 			DPRINTF(WM_DEBUG_LINK,
   10791 			    ("%s: LINK: set media -> link down\n",
   10792 			    device_xname(sc->sc_dev)));
   10793 			sc->sc_tbi_linkup = 0;
   10794 		}
   10795 	} else {
   10796 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10797 		    device_xname(sc->sc_dev)));
   10798 		sc->sc_tbi_linkup = 0;
   10799 	}
   10800 
   10801 	wm_tbi_serdes_set_linkled(sc);
   10802 
   10803 	return 0;
   10804 }
   10805 
   10806 /*
   10807  * wm_tbi_mediastatus:	[ifmedia interface function]
   10808  *
   10809  *	Get the current interface media status on a 1000BASE-X device.
   10810  */
   10811 static void
   10812 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10813 {
   10814 	struct wm_softc *sc = ifp->if_softc;
   10815 	uint32_t ctrl, status;
   10816 
   10817 	ifmr->ifm_status = IFM_AVALID;
   10818 	ifmr->ifm_active = IFM_ETHER;
   10819 
   10820 	status = CSR_READ(sc, WMREG_STATUS);
   10821 	if ((status & STATUS_LU) == 0) {
   10822 		ifmr->ifm_active |= IFM_NONE;
   10823 		return;
   10824 	}
   10825 
   10826 	ifmr->ifm_status |= IFM_ACTIVE;
   10827 	/* Only 82545 is LX */
   10828 	if (sc->sc_type == WM_T_82545)
   10829 		ifmr->ifm_active |= IFM_1000_LX;
   10830 	else
   10831 		ifmr->ifm_active |= IFM_1000_SX;
   10832 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10833 		ifmr->ifm_active |= IFM_FDX;
   10834 	else
   10835 		ifmr->ifm_active |= IFM_HDX;
   10836 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10837 	if (ctrl & CTRL_RFCE)
   10838 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10839 	if (ctrl & CTRL_TFCE)
   10840 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10841 }
   10842 
   10843 /* XXX TBI only */
   10844 static int
   10845 wm_check_for_link(struct wm_softc *sc)
   10846 {
   10847 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10848 	uint32_t rxcw;
   10849 	uint32_t ctrl;
   10850 	uint32_t status;
   10851 	uint32_t sig;
   10852 
   10853 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10854 		/* XXX need some work for >= 82571 */
   10855 		if (sc->sc_type >= WM_T_82571) {
   10856 			sc->sc_tbi_linkup = 1;
   10857 			return 0;
   10858 		}
   10859 	}
   10860 
   10861 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10862 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10863 	status = CSR_READ(sc, WMREG_STATUS);
   10864 
   10865 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10866 
   10867 	DPRINTF(WM_DEBUG_LINK,
   10868 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10869 		device_xname(sc->sc_dev), __func__,
   10870 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10871 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10872 
   10873 	/*
   10874 	 * SWDPIN   LU RXCW
   10875 	 *      0    0    0
   10876 	 *      0    0    1	(should not happen)
   10877 	 *      0    1    0	(should not happen)
   10878 	 *      0    1    1	(should not happen)
   10879 	 *      1    0    0	Disable autonego and force linkup
   10880 	 *      1    0    1	got /C/ but not linkup yet
   10881 	 *      1    1    0	(linkup)
   10882 	 *      1    1    1	If IFM_AUTO, back to autonego
   10883 	 *
   10884 	 */
   10885 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10886 	    && ((status & STATUS_LU) == 0)
   10887 	    && ((rxcw & RXCW_C) == 0)) {
   10888 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10889 			__func__));
   10890 		sc->sc_tbi_linkup = 0;
   10891 		/* Disable auto-negotiation in the TXCW register */
   10892 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10893 
   10894 		/*
   10895 		 * Force link-up and also force full-duplex.
   10896 		 *
   10897 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10898 		 * so we should update sc->sc_ctrl
   10899 		 */
   10900 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10901 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10902 	} else if (((status & STATUS_LU) != 0)
   10903 	    && ((rxcw & RXCW_C) != 0)
   10904 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10905 		sc->sc_tbi_linkup = 1;
   10906 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10907 			__func__));
   10908 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10909 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10910 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10911 	    && ((rxcw & RXCW_C) != 0)) {
   10912 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10913 	} else {
   10914 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10915 			status));
   10916 	}
   10917 
   10918 	return 0;
   10919 }
   10920 
   10921 /*
   10922  * wm_tbi_tick:
   10923  *
   10924  *	Check the link on TBI devices.
   10925  *	This function acts as mii_tick().
   10926  */
   10927 static void
   10928 wm_tbi_tick(struct wm_softc *sc)
   10929 {
   10930 	struct mii_data *mii = &sc->sc_mii;
   10931 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10932 	uint32_t status;
   10933 
   10934 	KASSERT(WM_CORE_LOCKED(sc));
   10935 
   10936 	status = CSR_READ(sc, WMREG_STATUS);
   10937 
   10938 	/* XXX is this needed? */
   10939 	(void)CSR_READ(sc, WMREG_RXCW);
   10940 	(void)CSR_READ(sc, WMREG_CTRL);
   10941 
   10942 	/* set link status */
   10943 	if ((status & STATUS_LU) == 0) {
   10944 		DPRINTF(WM_DEBUG_LINK,
   10945 		    ("%s: LINK: checklink -> down\n",
   10946 			device_xname(sc->sc_dev)));
   10947 		sc->sc_tbi_linkup = 0;
   10948 	} else if (sc->sc_tbi_linkup == 0) {
   10949 		DPRINTF(WM_DEBUG_LINK,
   10950 		    ("%s: LINK: checklink -> up %s\n",
   10951 			device_xname(sc->sc_dev),
   10952 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10953 		sc->sc_tbi_linkup = 1;
   10954 		sc->sc_tbi_serdes_ticks = 0;
   10955 	}
   10956 
   10957 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10958 		goto setled;
   10959 
   10960 	if ((status & STATUS_LU) == 0) {
   10961 		sc->sc_tbi_linkup = 0;
   10962 		/* If the timer expired, retry autonegotiation */
   10963 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10964 		    && (++sc->sc_tbi_serdes_ticks
   10965 			>= sc->sc_tbi_serdes_anegticks)) {
   10966 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10967 			sc->sc_tbi_serdes_ticks = 0;
   10968 			/*
   10969 			 * Reset the link, and let autonegotiation do
   10970 			 * its thing
   10971 			 */
   10972 			sc->sc_ctrl |= CTRL_LRST;
   10973 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10974 			CSR_WRITE_FLUSH(sc);
   10975 			delay(1000);
   10976 			sc->sc_ctrl &= ~CTRL_LRST;
   10977 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10978 			CSR_WRITE_FLUSH(sc);
   10979 			delay(1000);
   10980 			CSR_WRITE(sc, WMREG_TXCW,
   10981 			    sc->sc_txcw & ~TXCW_ANE);
   10982 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10983 		}
   10984 	}
   10985 
   10986 setled:
   10987 	wm_tbi_serdes_set_linkled(sc);
   10988 }
   10989 
   10990 /* SERDES related */
   10991 static void
   10992 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10993 {
   10994 	uint32_t reg;
   10995 
   10996 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10997 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10998 		return;
   10999 
   11000 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11001 	reg |= PCS_CFG_PCS_EN;
   11002 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11003 
   11004 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11005 	reg &= ~CTRL_EXT_SWDPIN(3);
   11006 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11007 	CSR_WRITE_FLUSH(sc);
   11008 }
   11009 
   11010 static int
   11011 wm_serdes_mediachange(struct ifnet *ifp)
   11012 {
   11013 	struct wm_softc *sc = ifp->if_softc;
   11014 	bool pcs_autoneg = true; /* XXX */
   11015 	uint32_t ctrl_ext, pcs_lctl, reg;
   11016 
   11017 	/* XXX Currently, this function is not called on 8257[12] */
   11018 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11019 	    || (sc->sc_type >= WM_T_82575))
   11020 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11021 
   11022 	wm_serdes_power_up_link_82575(sc);
   11023 
   11024 	sc->sc_ctrl |= CTRL_SLU;
   11025 
   11026 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11027 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11028 
   11029 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11030 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11031 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11032 	case CTRL_EXT_LINK_MODE_SGMII:
   11033 		pcs_autoneg = true;
   11034 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11035 		break;
   11036 	case CTRL_EXT_LINK_MODE_1000KX:
   11037 		pcs_autoneg = false;
   11038 		/* FALLTHROUGH */
   11039 	default:
   11040 		if ((sc->sc_type == WM_T_82575)
   11041 		    || (sc->sc_type == WM_T_82576)) {
   11042 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11043 				pcs_autoneg = false;
   11044 		}
   11045 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11046 		    | CTRL_FRCFDX;
   11047 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11048 	}
   11049 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11050 
   11051 	if (pcs_autoneg) {
   11052 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11053 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11054 
   11055 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11056 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11057 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11058 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11059 	} else
   11060 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11061 
   11062 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11063 
   11064 
   11065 	return 0;
   11066 }
   11067 
   11068 static void
   11069 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11070 {
   11071 	struct wm_softc *sc = ifp->if_softc;
   11072 	struct mii_data *mii = &sc->sc_mii;
   11073 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11074 	uint32_t pcs_adv, pcs_lpab, reg;
   11075 
   11076 	ifmr->ifm_status = IFM_AVALID;
   11077 	ifmr->ifm_active = IFM_ETHER;
   11078 
   11079 	/* Check PCS */
   11080 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11081 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11082 		ifmr->ifm_active |= IFM_NONE;
   11083 		sc->sc_tbi_linkup = 0;
   11084 		goto setled;
   11085 	}
   11086 
   11087 	sc->sc_tbi_linkup = 1;
   11088 	ifmr->ifm_status |= IFM_ACTIVE;
   11089 	if (sc->sc_type == WM_T_I354) {
   11090 		uint32_t status;
   11091 
   11092 		status = CSR_READ(sc, WMREG_STATUS);
   11093 		if (((status & STATUS_2P5_SKU) != 0)
   11094 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11095 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11096 		} else
   11097 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11098 	} else {
   11099 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11100 		case PCS_LSTS_SPEED_10:
   11101 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11102 			break;
   11103 		case PCS_LSTS_SPEED_100:
   11104 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11105 			break;
   11106 		case PCS_LSTS_SPEED_1000:
   11107 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11108 			break;
   11109 		default:
   11110 			device_printf(sc->sc_dev, "Unknown speed\n");
   11111 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11112 			break;
   11113 		}
   11114 	}
   11115 	if ((reg & PCS_LSTS_FDX) != 0)
   11116 		ifmr->ifm_active |= IFM_FDX;
   11117 	else
   11118 		ifmr->ifm_active |= IFM_HDX;
   11119 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11120 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11121 		/* Check flow */
   11122 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11123 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11124 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11125 			goto setled;
   11126 		}
   11127 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11128 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11129 		DPRINTF(WM_DEBUG_LINK,
   11130 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11131 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11132 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11133 			mii->mii_media_active |= IFM_FLOW
   11134 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11135 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11136 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11137 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11138 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11139 			mii->mii_media_active |= IFM_FLOW
   11140 			    | IFM_ETH_TXPAUSE;
   11141 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11142 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11143 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11144 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11145 			mii->mii_media_active |= IFM_FLOW
   11146 			    | IFM_ETH_RXPAUSE;
   11147 		}
   11148 	}
   11149 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11150 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11151 setled:
   11152 	wm_tbi_serdes_set_linkled(sc);
   11153 }
   11154 
   11155 /*
   11156  * wm_serdes_tick:
   11157  *
   11158  *	Check the link on serdes devices.
   11159  */
   11160 static void
   11161 wm_serdes_tick(struct wm_softc *sc)
   11162 {
   11163 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11164 	struct mii_data *mii = &sc->sc_mii;
   11165 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11166 	uint32_t reg;
   11167 
   11168 	KASSERT(WM_CORE_LOCKED(sc));
   11169 
   11170 	mii->mii_media_status = IFM_AVALID;
   11171 	mii->mii_media_active = IFM_ETHER;
   11172 
   11173 	/* Check PCS */
   11174 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11175 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11176 		mii->mii_media_status |= IFM_ACTIVE;
   11177 		sc->sc_tbi_linkup = 1;
   11178 		sc->sc_tbi_serdes_ticks = 0;
   11179 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11180 		if ((reg & PCS_LSTS_FDX) != 0)
   11181 			mii->mii_media_active |= IFM_FDX;
   11182 		else
   11183 			mii->mii_media_active |= IFM_HDX;
   11184 	} else {
   11185 		mii->mii_media_status |= IFM_NONE;
   11186 		sc->sc_tbi_linkup = 0;
   11187 		/* If the timer expired, retry autonegotiation */
   11188 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11189 		    && (++sc->sc_tbi_serdes_ticks
   11190 			>= sc->sc_tbi_serdes_anegticks)) {
   11191 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11192 			sc->sc_tbi_serdes_ticks = 0;
   11193 			/* XXX */
   11194 			wm_serdes_mediachange(ifp);
   11195 		}
   11196 	}
   11197 
   11198 	wm_tbi_serdes_set_linkled(sc);
   11199 }
   11200 
   11201 /* SFP related */
   11202 
   11203 static int
   11204 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11205 {
   11206 	uint32_t i2ccmd;
   11207 	int i;
   11208 
   11209 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11210 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11211 
   11212 	/* Poll the ready bit */
   11213 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11214 		delay(50);
   11215 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11216 		if (i2ccmd & I2CCMD_READY)
   11217 			break;
   11218 	}
   11219 	if ((i2ccmd & I2CCMD_READY) == 0)
   11220 		return -1;
   11221 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11222 		return -1;
   11223 
   11224 	*data = i2ccmd & 0x00ff;
   11225 
   11226 	return 0;
   11227 }
   11228 
   11229 static uint32_t
   11230 wm_sfp_get_media_type(struct wm_softc *sc)
   11231 {
   11232 	uint32_t ctrl_ext;
   11233 	uint8_t val = 0;
   11234 	int timeout = 3;
   11235 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11236 	int rv = -1;
   11237 
   11238 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11239 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11240 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11241 	CSR_WRITE_FLUSH(sc);
   11242 
   11243 	/* Read SFP module data */
   11244 	while (timeout) {
   11245 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11246 		if (rv == 0)
   11247 			break;
   11248 		delay(100*1000); /* XXX too big */
   11249 		timeout--;
   11250 	}
   11251 	if (rv != 0)
   11252 		goto out;
   11253 	switch (val) {
   11254 	case SFF_SFP_ID_SFF:
   11255 		aprint_normal_dev(sc->sc_dev,
   11256 		    "Module/Connector soldered to board\n");
   11257 		break;
   11258 	case SFF_SFP_ID_SFP:
   11259 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11260 		break;
   11261 	case SFF_SFP_ID_UNKNOWN:
   11262 		goto out;
   11263 	default:
   11264 		break;
   11265 	}
   11266 
   11267 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11268 	if (rv != 0) {
   11269 		goto out;
   11270 	}
   11271 
   11272 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11273 		mediatype = WM_MEDIATYPE_SERDES;
   11274 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11275 		sc->sc_flags |= WM_F_SGMII;
   11276 		mediatype = WM_MEDIATYPE_COPPER;
   11277 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11278 		sc->sc_flags |= WM_F_SGMII;
   11279 		mediatype = WM_MEDIATYPE_SERDES;
   11280 	}
   11281 
   11282 out:
   11283 	/* Restore I2C interface setting */
   11284 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11285 
   11286 	return mediatype;
   11287 }
   11288 
   11289 /*
   11290  * NVM related.
   11291  * Microwire, SPI (w/wo EERD) and Flash.
   11292  */
   11293 
   11294 /* Both spi and uwire */
   11295 
   11296 /*
   11297  * wm_eeprom_sendbits:
   11298  *
   11299  *	Send a series of bits to the EEPROM.
   11300  */
   11301 static void
   11302 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11303 {
   11304 	uint32_t reg;
   11305 	int x;
   11306 
   11307 	reg = CSR_READ(sc, WMREG_EECD);
   11308 
   11309 	for (x = nbits; x > 0; x--) {
   11310 		if (bits & (1U << (x - 1)))
   11311 			reg |= EECD_DI;
   11312 		else
   11313 			reg &= ~EECD_DI;
   11314 		CSR_WRITE(sc, WMREG_EECD, reg);
   11315 		CSR_WRITE_FLUSH(sc);
   11316 		delay(2);
   11317 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11318 		CSR_WRITE_FLUSH(sc);
   11319 		delay(2);
   11320 		CSR_WRITE(sc, WMREG_EECD, reg);
   11321 		CSR_WRITE_FLUSH(sc);
   11322 		delay(2);
   11323 	}
   11324 }
   11325 
   11326 /*
   11327  * wm_eeprom_recvbits:
   11328  *
   11329  *	Receive a series of bits from the EEPROM.
   11330  */
   11331 static void
   11332 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11333 {
   11334 	uint32_t reg, val;
   11335 	int x;
   11336 
   11337 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11338 
   11339 	val = 0;
   11340 	for (x = nbits; x > 0; x--) {
   11341 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11342 		CSR_WRITE_FLUSH(sc);
   11343 		delay(2);
   11344 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11345 			val |= (1U << (x - 1));
   11346 		CSR_WRITE(sc, WMREG_EECD, reg);
   11347 		CSR_WRITE_FLUSH(sc);
   11348 		delay(2);
   11349 	}
   11350 	*valp = val;
   11351 }
   11352 
   11353 /* Microwire */
   11354 
   11355 /*
   11356  * wm_nvm_read_uwire:
   11357  *
   11358  *	Read a word from the EEPROM using the MicroWire protocol.
   11359  */
   11360 static int
   11361 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11362 {
   11363 	uint32_t reg, val;
   11364 	int i;
   11365 
   11366 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11367 		device_xname(sc->sc_dev), __func__));
   11368 
   11369 	for (i = 0; i < wordcnt; i++) {
   11370 		/* Clear SK and DI. */
   11371 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11372 		CSR_WRITE(sc, WMREG_EECD, reg);
   11373 
   11374 		/*
   11375 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11376 		 * and Xen.
   11377 		 *
   11378 		 * We use this workaround only for 82540 because qemu's
   11379 		 * e1000 act as 82540.
   11380 		 */
   11381 		if (sc->sc_type == WM_T_82540) {
   11382 			reg |= EECD_SK;
   11383 			CSR_WRITE(sc, WMREG_EECD, reg);
   11384 			reg &= ~EECD_SK;
   11385 			CSR_WRITE(sc, WMREG_EECD, reg);
   11386 			CSR_WRITE_FLUSH(sc);
   11387 			delay(2);
   11388 		}
   11389 		/* XXX: end of workaround */
   11390 
   11391 		/* Set CHIP SELECT. */
   11392 		reg |= EECD_CS;
   11393 		CSR_WRITE(sc, WMREG_EECD, reg);
   11394 		CSR_WRITE_FLUSH(sc);
   11395 		delay(2);
   11396 
   11397 		/* Shift in the READ command. */
   11398 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11399 
   11400 		/* Shift in address. */
   11401 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11402 
   11403 		/* Shift out the data. */
   11404 		wm_eeprom_recvbits(sc, &val, 16);
   11405 		data[i] = val & 0xffff;
   11406 
   11407 		/* Clear CHIP SELECT. */
   11408 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11409 		CSR_WRITE(sc, WMREG_EECD, reg);
   11410 		CSR_WRITE_FLUSH(sc);
   11411 		delay(2);
   11412 	}
   11413 
   11414 	return 0;
   11415 }
   11416 
   11417 /* SPI */
   11418 
   11419 /*
   11420  * Set SPI and FLASH related information from the EECD register.
   11421  * For 82541 and 82547, the word size is taken from EEPROM.
   11422  */
   11423 static int
   11424 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11425 {
   11426 	int size;
   11427 	uint32_t reg;
   11428 	uint16_t data;
   11429 
   11430 	reg = CSR_READ(sc, WMREG_EECD);
   11431 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11432 
   11433 	/* Read the size of NVM from EECD by default */
   11434 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11435 	switch (sc->sc_type) {
   11436 	case WM_T_82541:
   11437 	case WM_T_82541_2:
   11438 	case WM_T_82547:
   11439 	case WM_T_82547_2:
   11440 		/* Set dummy value to access EEPROM */
   11441 		sc->sc_nvm_wordsize = 64;
   11442 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11443 		reg = data;
   11444 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11445 		if (size == 0)
   11446 			size = 6; /* 64 word size */
   11447 		else
   11448 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11449 		break;
   11450 	case WM_T_80003:
   11451 	case WM_T_82571:
   11452 	case WM_T_82572:
   11453 	case WM_T_82573: /* SPI case */
   11454 	case WM_T_82574: /* SPI case */
   11455 	case WM_T_82583: /* SPI case */
   11456 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11457 		if (size > 14)
   11458 			size = 14;
   11459 		break;
   11460 	case WM_T_82575:
   11461 	case WM_T_82576:
   11462 	case WM_T_82580:
   11463 	case WM_T_I350:
   11464 	case WM_T_I354:
   11465 	case WM_T_I210:
   11466 	case WM_T_I211:
   11467 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11468 		if (size > 15)
   11469 			size = 15;
   11470 		break;
   11471 	default:
   11472 		aprint_error_dev(sc->sc_dev,
   11473 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11474 		return -1;
   11475 		break;
   11476 	}
   11477 
   11478 	sc->sc_nvm_wordsize = 1 << size;
   11479 
   11480 	return 0;
   11481 }
   11482 
   11483 /*
   11484  * wm_nvm_ready_spi:
   11485  *
   11486  *	Wait for a SPI EEPROM to be ready for commands.
   11487  */
   11488 static int
   11489 wm_nvm_ready_spi(struct wm_softc *sc)
   11490 {
   11491 	uint32_t val;
   11492 	int usec;
   11493 
   11494 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11495 		device_xname(sc->sc_dev), __func__));
   11496 
   11497 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11498 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11499 		wm_eeprom_recvbits(sc, &val, 8);
   11500 		if ((val & SPI_SR_RDY) == 0)
   11501 			break;
   11502 	}
   11503 	if (usec >= SPI_MAX_RETRIES) {
   11504 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11505 		return 1;
   11506 	}
   11507 	return 0;
   11508 }
   11509 
   11510 /*
   11511  * wm_nvm_read_spi:
   11512  *
   11513  *	Read a work from the EEPROM using the SPI protocol.
   11514  */
   11515 static int
   11516 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11517 {
   11518 	uint32_t reg, val;
   11519 	int i;
   11520 	uint8_t opc;
   11521 
   11522 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11523 		device_xname(sc->sc_dev), __func__));
   11524 
   11525 	/* Clear SK and CS. */
   11526 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11527 	CSR_WRITE(sc, WMREG_EECD, reg);
   11528 	CSR_WRITE_FLUSH(sc);
   11529 	delay(2);
   11530 
   11531 	if (wm_nvm_ready_spi(sc))
   11532 		return 1;
   11533 
   11534 	/* Toggle CS to flush commands. */
   11535 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11536 	CSR_WRITE_FLUSH(sc);
   11537 	delay(2);
   11538 	CSR_WRITE(sc, WMREG_EECD, reg);
   11539 	CSR_WRITE_FLUSH(sc);
   11540 	delay(2);
   11541 
   11542 	opc = SPI_OPC_READ;
   11543 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11544 		opc |= SPI_OPC_A8;
   11545 
   11546 	wm_eeprom_sendbits(sc, opc, 8);
   11547 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11548 
   11549 	for (i = 0; i < wordcnt; i++) {
   11550 		wm_eeprom_recvbits(sc, &val, 16);
   11551 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11552 	}
   11553 
   11554 	/* Raise CS and clear SK. */
   11555 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11556 	CSR_WRITE(sc, WMREG_EECD, reg);
   11557 	CSR_WRITE_FLUSH(sc);
   11558 	delay(2);
   11559 
   11560 	return 0;
   11561 }
   11562 
   11563 /* Using with EERD */
   11564 
   11565 static int
   11566 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11567 {
   11568 	uint32_t attempts = 100000;
   11569 	uint32_t i, reg = 0;
   11570 	int32_t done = -1;
   11571 
   11572 	for (i = 0; i < attempts; i++) {
   11573 		reg = CSR_READ(sc, rw);
   11574 
   11575 		if (reg & EERD_DONE) {
   11576 			done = 0;
   11577 			break;
   11578 		}
   11579 		delay(5);
   11580 	}
   11581 
   11582 	return done;
   11583 }
   11584 
   11585 static int
   11586 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11587     uint16_t *data)
   11588 {
   11589 	int i, eerd = 0;
   11590 	int error = 0;
   11591 
   11592 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11593 		device_xname(sc->sc_dev), __func__));
   11594 
   11595 	for (i = 0; i < wordcnt; i++) {
   11596 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11597 
   11598 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11599 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11600 		if (error != 0)
   11601 			break;
   11602 
   11603 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11604 	}
   11605 
   11606 	return error;
   11607 }
   11608 
   11609 /* Flash */
   11610 
   11611 static int
   11612 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11613 {
   11614 	uint32_t eecd;
   11615 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11616 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11617 	uint8_t sig_byte = 0;
   11618 
   11619 	switch (sc->sc_type) {
   11620 	case WM_T_PCH_SPT:
   11621 		/*
   11622 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11623 		 * sector valid bits from the NVM.
   11624 		 */
   11625 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11626 		if ((*bank == 0) || (*bank == 1)) {
   11627 			aprint_error_dev(sc->sc_dev,
   11628 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11629 				*bank);
   11630 			return -1;
   11631 		} else {
   11632 			*bank = *bank - 2;
   11633 			return 0;
   11634 		}
   11635 	case WM_T_ICH8:
   11636 	case WM_T_ICH9:
   11637 		eecd = CSR_READ(sc, WMREG_EECD);
   11638 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11639 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11640 			return 0;
   11641 		}
   11642 		/* FALLTHROUGH */
   11643 	default:
   11644 		/* Default to 0 */
   11645 		*bank = 0;
   11646 
   11647 		/* Check bank 0 */
   11648 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11649 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11650 			*bank = 0;
   11651 			return 0;
   11652 		}
   11653 
   11654 		/* Check bank 1 */
   11655 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11656 		    &sig_byte);
   11657 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11658 			*bank = 1;
   11659 			return 0;
   11660 		}
   11661 	}
   11662 
   11663 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11664 		device_xname(sc->sc_dev)));
   11665 	return -1;
   11666 }
   11667 
   11668 /******************************************************************************
   11669  * This function does initial flash setup so that a new read/write/erase cycle
   11670  * can be started.
   11671  *
   11672  * sc - The pointer to the hw structure
   11673  ****************************************************************************/
   11674 static int32_t
   11675 wm_ich8_cycle_init(struct wm_softc *sc)
   11676 {
   11677 	uint16_t hsfsts;
   11678 	int32_t error = 1;
   11679 	int32_t i     = 0;
   11680 
   11681 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11682 
   11683 	/* May be check the Flash Des Valid bit in Hw status */
   11684 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11685 		return error;
   11686 	}
   11687 
   11688 	/* Clear FCERR in Hw status by writing 1 */
   11689 	/* Clear DAEL in Hw status by writing a 1 */
   11690 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11691 
   11692 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11693 
   11694 	/*
   11695 	 * Either we should have a hardware SPI cycle in progress bit to check
   11696 	 * against, in order to start a new cycle or FDONE bit should be
   11697 	 * changed in the hardware so that it is 1 after harware reset, which
   11698 	 * can then be used as an indication whether a cycle is in progress or
   11699 	 * has been completed .. we should also have some software semaphore
   11700 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11701 	 * threads access to those bits can be sequentiallized or a way so that
   11702 	 * 2 threads dont start the cycle at the same time
   11703 	 */
   11704 
   11705 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11706 		/*
   11707 		 * There is no cycle running at present, so we can start a
   11708 		 * cycle
   11709 		 */
   11710 
   11711 		/* Begin by setting Flash Cycle Done. */
   11712 		hsfsts |= HSFSTS_DONE;
   11713 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11714 		error = 0;
   11715 	} else {
   11716 		/*
   11717 		 * otherwise poll for sometime so the current cycle has a
   11718 		 * chance to end before giving up.
   11719 		 */
   11720 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11721 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11722 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11723 				error = 0;
   11724 				break;
   11725 			}
   11726 			delay(1);
   11727 		}
   11728 		if (error == 0) {
   11729 			/*
   11730 			 * Successful in waiting for previous cycle to timeout,
   11731 			 * now set the Flash Cycle Done.
   11732 			 */
   11733 			hsfsts |= HSFSTS_DONE;
   11734 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11735 		}
   11736 	}
   11737 	return error;
   11738 }
   11739 
   11740 /******************************************************************************
   11741  * This function starts a flash cycle and waits for its completion
   11742  *
   11743  * sc - The pointer to the hw structure
   11744  ****************************************************************************/
   11745 static int32_t
   11746 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11747 {
   11748 	uint16_t hsflctl;
   11749 	uint16_t hsfsts;
   11750 	int32_t error = 1;
   11751 	uint32_t i = 0;
   11752 
   11753 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11754 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11755 	hsflctl |= HSFCTL_GO;
   11756 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11757 
   11758 	/* Wait till FDONE bit is set to 1 */
   11759 	do {
   11760 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11761 		if (hsfsts & HSFSTS_DONE)
   11762 			break;
   11763 		delay(1);
   11764 		i++;
   11765 	} while (i < timeout);
   11766 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11767 		error = 0;
   11768 
   11769 	return error;
   11770 }
   11771 
   11772 /******************************************************************************
   11773  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11774  *
   11775  * sc - The pointer to the hw structure
   11776  * index - The index of the byte or word to read.
   11777  * size - Size of data to read, 1=byte 2=word, 4=dword
   11778  * data - Pointer to the word to store the value read.
   11779  *****************************************************************************/
   11780 static int32_t
   11781 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11782     uint32_t size, uint32_t *data)
   11783 {
   11784 	uint16_t hsfsts;
   11785 	uint16_t hsflctl;
   11786 	uint32_t flash_linear_address;
   11787 	uint32_t flash_data = 0;
   11788 	int32_t error = 1;
   11789 	int32_t count = 0;
   11790 
   11791 	if (size < 1  || size > 4 || data == 0x0 ||
   11792 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11793 		return error;
   11794 
   11795 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11796 	    sc->sc_ich8_flash_base;
   11797 
   11798 	do {
   11799 		delay(1);
   11800 		/* Steps */
   11801 		error = wm_ich8_cycle_init(sc);
   11802 		if (error)
   11803 			break;
   11804 
   11805 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11806 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11807 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11808 		    & HSFCTL_BCOUNT_MASK;
   11809 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11810 		if (sc->sc_type == WM_T_PCH_SPT) {
   11811 			/*
   11812 			 * In SPT, This register is in Lan memory space, not
   11813 			 * flash. Therefore, only 32 bit access is supported.
   11814 			 */
   11815 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11816 			    (uint32_t)hsflctl);
   11817 		} else
   11818 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11819 
   11820 		/*
   11821 		 * Write the last 24 bits of index into Flash Linear address
   11822 		 * field in Flash Address
   11823 		 */
   11824 		/* TODO: TBD maybe check the index against the size of flash */
   11825 
   11826 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11827 
   11828 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11829 
   11830 		/*
   11831 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11832 		 * the whole sequence a few more times, else read in (shift in)
   11833 		 * the Flash Data0, the order is least significant byte first
   11834 		 * msb to lsb
   11835 		 */
   11836 		if (error == 0) {
   11837 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11838 			if (size == 1)
   11839 				*data = (uint8_t)(flash_data & 0x000000FF);
   11840 			else if (size == 2)
   11841 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11842 			else if (size == 4)
   11843 				*data = (uint32_t)flash_data;
   11844 			break;
   11845 		} else {
   11846 			/*
   11847 			 * If we've gotten here, then things are probably
   11848 			 * completely hosed, but if the error condition is
   11849 			 * detected, it won't hurt to give it another try...
   11850 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11851 			 */
   11852 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11853 			if (hsfsts & HSFSTS_ERR) {
   11854 				/* Repeat for some time before giving up. */
   11855 				continue;
   11856 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11857 				break;
   11858 		}
   11859 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11860 
   11861 	return error;
   11862 }
   11863 
   11864 /******************************************************************************
   11865  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11866  *
   11867  * sc - pointer to wm_hw structure
   11868  * index - The index of the byte to read.
   11869  * data - Pointer to a byte to store the value read.
   11870  *****************************************************************************/
   11871 static int32_t
   11872 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11873 {
   11874 	int32_t status;
   11875 	uint32_t word = 0;
   11876 
   11877 	status = wm_read_ich8_data(sc, index, 1, &word);
   11878 	if (status == 0)
   11879 		*data = (uint8_t)word;
   11880 	else
   11881 		*data = 0;
   11882 
   11883 	return status;
   11884 }
   11885 
   11886 /******************************************************************************
   11887  * Reads a word from the NVM using the ICH8 flash access registers.
   11888  *
   11889  * sc - pointer to wm_hw structure
   11890  * index - The starting byte index of the word to read.
   11891  * data - Pointer to a word to store the value read.
   11892  *****************************************************************************/
   11893 static int32_t
   11894 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11895 {
   11896 	int32_t status;
   11897 	uint32_t word = 0;
   11898 
   11899 	status = wm_read_ich8_data(sc, index, 2, &word);
   11900 	if (status == 0)
   11901 		*data = (uint16_t)word;
   11902 	else
   11903 		*data = 0;
   11904 
   11905 	return status;
   11906 }
   11907 
   11908 /******************************************************************************
   11909  * Reads a dword from the NVM using the ICH8 flash access registers.
   11910  *
   11911  * sc - pointer to wm_hw structure
   11912  * index - The starting byte index of the word to read.
   11913  * data - Pointer to a word to store the value read.
   11914  *****************************************************************************/
   11915 static int32_t
   11916 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11917 {
   11918 	int32_t status;
   11919 
   11920 	status = wm_read_ich8_data(sc, index, 4, data);
   11921 	return status;
   11922 }
   11923 
   11924 /******************************************************************************
   11925  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11926  * register.
   11927  *
   11928  * sc - Struct containing variables accessed by shared code
   11929  * offset - offset of word in the EEPROM to read
   11930  * data - word read from the EEPROM
   11931  * words - number of words to read
   11932  *****************************************************************************/
   11933 static int
   11934 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11935 {
   11936 	int32_t  error = 0;
   11937 	uint32_t flash_bank = 0;
   11938 	uint32_t act_offset = 0;
   11939 	uint32_t bank_offset = 0;
   11940 	uint16_t word = 0;
   11941 	uint16_t i = 0;
   11942 
   11943 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11944 		device_xname(sc->sc_dev), __func__));
   11945 
   11946 	/*
   11947 	 * We need to know which is the valid flash bank.  In the event
   11948 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11949 	 * managing flash_bank.  So it cannot be trusted and needs
   11950 	 * to be updated with each read.
   11951 	 */
   11952 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11953 	if (error) {
   11954 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11955 			device_xname(sc->sc_dev)));
   11956 		flash_bank = 0;
   11957 	}
   11958 
   11959 	/*
   11960 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11961 	 * size
   11962 	 */
   11963 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11964 
   11965 	error = wm_get_swfwhw_semaphore(sc);
   11966 	if (error) {
   11967 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11968 		    __func__);
   11969 		return error;
   11970 	}
   11971 
   11972 	for (i = 0; i < words; i++) {
   11973 		/* The NVM part needs a byte offset, hence * 2 */
   11974 		act_offset = bank_offset + ((offset + i) * 2);
   11975 		error = wm_read_ich8_word(sc, act_offset, &word);
   11976 		if (error) {
   11977 			aprint_error_dev(sc->sc_dev,
   11978 			    "%s: failed to read NVM\n", __func__);
   11979 			break;
   11980 		}
   11981 		data[i] = word;
   11982 	}
   11983 
   11984 	wm_put_swfwhw_semaphore(sc);
   11985 	return error;
   11986 }
   11987 
   11988 /******************************************************************************
   11989  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11990  * register.
   11991  *
   11992  * sc - Struct containing variables accessed by shared code
   11993  * offset - offset of word in the EEPROM to read
   11994  * data - word read from the EEPROM
   11995  * words - number of words to read
   11996  *****************************************************************************/
   11997 static int
   11998 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11999 {
   12000 	int32_t  error = 0;
   12001 	uint32_t flash_bank = 0;
   12002 	uint32_t act_offset = 0;
   12003 	uint32_t bank_offset = 0;
   12004 	uint32_t dword = 0;
   12005 	uint16_t i = 0;
   12006 
   12007 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12008 		device_xname(sc->sc_dev), __func__));
   12009 
   12010 	/*
   12011 	 * We need to know which is the valid flash bank.  In the event
   12012 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12013 	 * managing flash_bank.  So it cannot be trusted and needs
   12014 	 * to be updated with each read.
   12015 	 */
   12016 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12017 	if (error) {
   12018 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12019 			device_xname(sc->sc_dev)));
   12020 		flash_bank = 0;
   12021 	}
   12022 
   12023 	/*
   12024 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12025 	 * size
   12026 	 */
   12027 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12028 
   12029 	error = wm_get_swfwhw_semaphore(sc);
   12030 	if (error) {
   12031 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   12032 		    __func__);
   12033 		return error;
   12034 	}
   12035 
   12036 	for (i = 0; i < words; i++) {
   12037 		/* The NVM part needs a byte offset, hence * 2 */
   12038 		act_offset = bank_offset + ((offset + i) * 2);
   12039 		/* but we must read dword aligned, so mask ... */
   12040 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12041 		if (error) {
   12042 			aprint_error_dev(sc->sc_dev,
   12043 			    "%s: failed to read NVM\n", __func__);
   12044 			break;
   12045 		}
   12046 		/* ... and pick out low or high word */
   12047 		if ((act_offset & 0x2) == 0)
   12048 			data[i] = (uint16_t)(dword & 0xFFFF);
   12049 		else
   12050 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12051 	}
   12052 
   12053 	wm_put_swfwhw_semaphore(sc);
   12054 	return error;
   12055 }
   12056 
   12057 /* iNVM */
   12058 
   12059 static int
   12060 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12061 {
   12062 	int32_t  rv = 0;
   12063 	uint32_t invm_dword;
   12064 	uint16_t i;
   12065 	uint8_t record_type, word_address;
   12066 
   12067 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12068 		device_xname(sc->sc_dev), __func__));
   12069 
   12070 	for (i = 0; i < INVM_SIZE; i++) {
   12071 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12072 		/* Get record type */
   12073 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12074 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12075 			break;
   12076 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12077 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12078 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12079 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12080 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12081 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12082 			if (word_address == address) {
   12083 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12084 				rv = 0;
   12085 				break;
   12086 			}
   12087 		}
   12088 	}
   12089 
   12090 	return rv;
   12091 }
   12092 
   12093 static int
   12094 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12095 {
   12096 	int rv = 0;
   12097 	int i;
   12098 
   12099 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12100 		device_xname(sc->sc_dev), __func__));
   12101 
   12102 	for (i = 0; i < words; i++) {
   12103 		switch (offset + i) {
   12104 		case NVM_OFF_MACADDR:
   12105 		case NVM_OFF_MACADDR1:
   12106 		case NVM_OFF_MACADDR2:
   12107 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12108 			if (rv != 0) {
   12109 				data[i] = 0xffff;
   12110 				rv = -1;
   12111 			}
   12112 			break;
   12113 		case NVM_OFF_CFG2:
   12114 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12115 			if (rv != 0) {
   12116 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12117 				rv = 0;
   12118 			}
   12119 			break;
   12120 		case NVM_OFF_CFG4:
   12121 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12122 			if (rv != 0) {
   12123 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12124 				rv = 0;
   12125 			}
   12126 			break;
   12127 		case NVM_OFF_LED_1_CFG:
   12128 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12129 			if (rv != 0) {
   12130 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12131 				rv = 0;
   12132 			}
   12133 			break;
   12134 		case NVM_OFF_LED_0_2_CFG:
   12135 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12136 			if (rv != 0) {
   12137 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12138 				rv = 0;
   12139 			}
   12140 			break;
   12141 		case NVM_OFF_ID_LED_SETTINGS:
   12142 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12143 			if (rv != 0) {
   12144 				*data = ID_LED_RESERVED_FFFF;
   12145 				rv = 0;
   12146 			}
   12147 			break;
   12148 		default:
   12149 			DPRINTF(WM_DEBUG_NVM,
   12150 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12151 			*data = NVM_RESERVED_WORD;
   12152 			break;
   12153 		}
   12154 	}
   12155 
   12156 	return rv;
   12157 }
   12158 
   12159 /* Lock, detecting NVM type, validate checksum, version and read */
   12160 
   12161 /*
   12162  * wm_nvm_acquire:
   12163  *
   12164  *	Perform the EEPROM handshake required on some chips.
   12165  */
   12166 static int
   12167 wm_nvm_acquire(struct wm_softc *sc)
   12168 {
   12169 	uint32_t reg;
   12170 	int x;
   12171 	int ret = 0;
   12172 
   12173 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12174 		device_xname(sc->sc_dev), __func__));
   12175 
   12176 	if (sc->sc_type >= WM_T_ICH8) {
   12177 		ret = wm_get_nvm_ich8lan(sc);
   12178 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   12179 		ret = wm_get_swfwhw_semaphore(sc);
   12180 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   12181 		/* This will also do wm_get_swsm_semaphore() if needed */
   12182 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   12183 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12184 		ret = wm_get_swsm_semaphore(sc);
   12185 	}
   12186 
   12187 	if (ret) {
   12188 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   12189 			__func__);
   12190 		return 1;
   12191 	}
   12192 
   12193 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12194 		reg = CSR_READ(sc, WMREG_EECD);
   12195 
   12196 		/* Request EEPROM access. */
   12197 		reg |= EECD_EE_REQ;
   12198 		CSR_WRITE(sc, WMREG_EECD, reg);
   12199 
   12200 		/* ..and wait for it to be granted. */
   12201 		for (x = 0; x < 1000; x++) {
   12202 			reg = CSR_READ(sc, WMREG_EECD);
   12203 			if (reg & EECD_EE_GNT)
   12204 				break;
   12205 			delay(5);
   12206 		}
   12207 		if ((reg & EECD_EE_GNT) == 0) {
   12208 			aprint_error_dev(sc->sc_dev,
   12209 			    "could not acquire EEPROM GNT\n");
   12210 			reg &= ~EECD_EE_REQ;
   12211 			CSR_WRITE(sc, WMREG_EECD, reg);
   12212 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12213 				wm_put_swfwhw_semaphore(sc);
   12214 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   12215 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12216 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12217 				wm_put_swsm_semaphore(sc);
   12218 			return 1;
   12219 		}
   12220 	}
   12221 
   12222 	return 0;
   12223 }
   12224 
   12225 /*
   12226  * wm_nvm_release:
   12227  *
   12228  *	Release the EEPROM mutex.
   12229  */
   12230 static void
   12231 wm_nvm_release(struct wm_softc *sc)
   12232 {
   12233 	uint32_t reg;
   12234 
   12235 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12236 		device_xname(sc->sc_dev), __func__));
   12237 
   12238 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12239 		reg = CSR_READ(sc, WMREG_EECD);
   12240 		reg &= ~EECD_EE_REQ;
   12241 		CSR_WRITE(sc, WMREG_EECD, reg);
   12242 	}
   12243 
   12244 	if (sc->sc_type >= WM_T_ICH8) {
   12245 		wm_put_nvm_ich8lan(sc);
   12246 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12247 		wm_put_swfwhw_semaphore(sc);
   12248 	else if (sc->sc_flags & WM_F_LOCK_SWFW)
   12249 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12250 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12251 		wm_put_swsm_semaphore(sc);
   12252 }
   12253 
   12254 static int
   12255 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12256 {
   12257 	uint32_t eecd = 0;
   12258 
   12259 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12260 	    || sc->sc_type == WM_T_82583) {
   12261 		eecd = CSR_READ(sc, WMREG_EECD);
   12262 
   12263 		/* Isolate bits 15 & 16 */
   12264 		eecd = ((eecd >> 15) & 0x03);
   12265 
   12266 		/* If both bits are set, device is Flash type */
   12267 		if (eecd == 0x03)
   12268 			return 0;
   12269 	}
   12270 	return 1;
   12271 }
   12272 
   12273 static int
   12274 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12275 {
   12276 	uint32_t eec;
   12277 
   12278 	eec = CSR_READ(sc, WMREG_EEC);
   12279 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12280 		return 1;
   12281 
   12282 	return 0;
   12283 }
   12284 
   12285 /*
   12286  * wm_nvm_validate_checksum
   12287  *
   12288  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12289  */
   12290 static int
   12291 wm_nvm_validate_checksum(struct wm_softc *sc)
   12292 {
   12293 	uint16_t checksum;
   12294 	uint16_t eeprom_data;
   12295 #ifdef WM_DEBUG
   12296 	uint16_t csum_wordaddr, valid_checksum;
   12297 #endif
   12298 	int i;
   12299 
   12300 	checksum = 0;
   12301 
   12302 	/* Don't check for I211 */
   12303 	if (sc->sc_type == WM_T_I211)
   12304 		return 0;
   12305 
   12306 #ifdef WM_DEBUG
   12307 	if (sc->sc_type == WM_T_PCH_LPT) {
   12308 		csum_wordaddr = NVM_OFF_COMPAT;
   12309 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12310 	} else {
   12311 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12312 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12313 	}
   12314 
   12315 	/* Dump EEPROM image for debug */
   12316 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12317 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12318 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12319 		/* XXX PCH_SPT? */
   12320 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12321 		if ((eeprom_data & valid_checksum) == 0) {
   12322 			DPRINTF(WM_DEBUG_NVM,
   12323 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12324 				device_xname(sc->sc_dev), eeprom_data,
   12325 				    valid_checksum));
   12326 		}
   12327 	}
   12328 
   12329 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12330 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12331 		for (i = 0; i < NVM_SIZE; i++) {
   12332 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12333 				printf("XXXX ");
   12334 			else
   12335 				printf("%04hx ", eeprom_data);
   12336 			if (i % 8 == 7)
   12337 				printf("\n");
   12338 		}
   12339 	}
   12340 
   12341 #endif /* WM_DEBUG */
   12342 
   12343 	for (i = 0; i < NVM_SIZE; i++) {
   12344 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12345 			return 1;
   12346 		checksum += eeprom_data;
   12347 	}
   12348 
   12349 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12350 #ifdef WM_DEBUG
   12351 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12352 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12353 #endif
   12354 	}
   12355 
   12356 	return 0;
   12357 }
   12358 
   12359 static void
   12360 wm_nvm_version_invm(struct wm_softc *sc)
   12361 {
   12362 	uint32_t dword;
   12363 
   12364 	/*
   12365 	 * Linux's code to decode version is very strange, so we don't
   12366 	 * obey that algorithm and just use word 61 as the document.
   12367 	 * Perhaps it's not perfect though...
   12368 	 *
   12369 	 * Example:
   12370 	 *
   12371 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12372 	 */
   12373 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12374 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12375 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12376 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12377 }
   12378 
   12379 static void
   12380 wm_nvm_version(struct wm_softc *sc)
   12381 {
   12382 	uint16_t major, minor, build, patch;
   12383 	uint16_t uid0, uid1;
   12384 	uint16_t nvm_data;
   12385 	uint16_t off;
   12386 	bool check_version = false;
   12387 	bool check_optionrom = false;
   12388 	bool have_build = false;
   12389 	bool have_uid = true;
   12390 
   12391 	/*
   12392 	 * Version format:
   12393 	 *
   12394 	 * XYYZ
   12395 	 * X0YZ
   12396 	 * X0YY
   12397 	 *
   12398 	 * Example:
   12399 	 *
   12400 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12401 	 *	82571	0x50a6	5.10.6?
   12402 	 *	82572	0x506a	5.6.10?
   12403 	 *	82572EI	0x5069	5.6.9?
   12404 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12405 	 *		0x2013	2.1.3?
   12406 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12407 	 */
   12408 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12409 	switch (sc->sc_type) {
   12410 	case WM_T_82571:
   12411 	case WM_T_82572:
   12412 	case WM_T_82574:
   12413 	case WM_T_82583:
   12414 		check_version = true;
   12415 		check_optionrom = true;
   12416 		have_build = true;
   12417 		break;
   12418 	case WM_T_82575:
   12419 	case WM_T_82576:
   12420 	case WM_T_82580:
   12421 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12422 			check_version = true;
   12423 		break;
   12424 	case WM_T_I211:
   12425 		wm_nvm_version_invm(sc);
   12426 		have_uid = false;
   12427 		goto printver;
   12428 	case WM_T_I210:
   12429 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12430 			wm_nvm_version_invm(sc);
   12431 			have_uid = false;
   12432 			goto printver;
   12433 		}
   12434 		/* FALLTHROUGH */
   12435 	case WM_T_I350:
   12436 	case WM_T_I354:
   12437 		check_version = true;
   12438 		check_optionrom = true;
   12439 		break;
   12440 	default:
   12441 		return;
   12442 	}
   12443 	if (check_version) {
   12444 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12445 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12446 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12447 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12448 			build = nvm_data & NVM_BUILD_MASK;
   12449 			have_build = true;
   12450 		} else
   12451 			minor = nvm_data & 0x00ff;
   12452 
   12453 		/* Decimal */
   12454 		minor = (minor / 16) * 10 + (minor % 16);
   12455 		sc->sc_nvm_ver_major = major;
   12456 		sc->sc_nvm_ver_minor = minor;
   12457 
   12458 printver:
   12459 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12460 		    sc->sc_nvm_ver_minor);
   12461 		if (have_build) {
   12462 			sc->sc_nvm_ver_build = build;
   12463 			aprint_verbose(".%d", build);
   12464 		}
   12465 	}
   12466 	if (check_optionrom) {
   12467 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12468 		/* Option ROM Version */
   12469 		if ((off != 0x0000) && (off != 0xffff)) {
   12470 			off += NVM_COMBO_VER_OFF;
   12471 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12472 			wm_nvm_read(sc, off, 1, &uid0);
   12473 			if ((uid0 != 0) && (uid0 != 0xffff)
   12474 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12475 				/* 16bits */
   12476 				major = uid0 >> 8;
   12477 				build = (uid0 << 8) | (uid1 >> 8);
   12478 				patch = uid1 & 0x00ff;
   12479 				aprint_verbose(", option ROM Version %d.%d.%d",
   12480 				    major, build, patch);
   12481 			}
   12482 		}
   12483 	}
   12484 
   12485 	if (have_uid) {
   12486 		wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12487 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12488 	}
   12489 }
   12490 
   12491 /*
   12492  * wm_nvm_read:
   12493  *
   12494  *	Read data from the serial EEPROM.
   12495  */
   12496 static int
   12497 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12498 {
   12499 	int rv;
   12500 
   12501 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12502 		device_xname(sc->sc_dev), __func__));
   12503 
   12504 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12505 		return 1;
   12506 
   12507 	if (wm_nvm_acquire(sc))
   12508 		return 1;
   12509 
   12510 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12511 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12512 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   12513 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   12514 	else if (sc->sc_type == WM_T_PCH_SPT)
   12515 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   12516 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   12517 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   12518 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   12519 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   12520 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   12521 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   12522 	else
   12523 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   12524 
   12525 	wm_nvm_release(sc);
   12526 	return rv;
   12527 }
   12528 
   12529 /*
   12530  * Hardware semaphores.
   12531  * Very complexed...
   12532  */
   12533 
   12534 static int
   12535 wm_get_null(struct wm_softc *sc)
   12536 {
   12537 
   12538 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12539 		device_xname(sc->sc_dev), __func__));
   12540 	return 0;
   12541 }
   12542 
   12543 static void
   12544 wm_put_null(struct wm_softc *sc)
   12545 {
   12546 
   12547 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12548 		device_xname(sc->sc_dev), __func__));
   12549 	return;
   12550 }
   12551 
   12552 /*
   12553  * Get hardware semaphore.
   12554  * Same as e1000_get_hw_semaphore_generic()
   12555  */
   12556 static int
   12557 wm_get_swsm_semaphore(struct wm_softc *sc)
   12558 {
   12559 	int32_t timeout;
   12560 	uint32_t swsm;
   12561 
   12562 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12563 		device_xname(sc->sc_dev), __func__));
   12564 	KASSERT(sc->sc_nvm_wordsize > 0);
   12565 
   12566 	/* Get the SW semaphore. */
   12567 	timeout = sc->sc_nvm_wordsize + 1;
   12568 	while (timeout) {
   12569 		swsm = CSR_READ(sc, WMREG_SWSM);
   12570 
   12571 		if ((swsm & SWSM_SMBI) == 0)
   12572 			break;
   12573 
   12574 		delay(50);
   12575 		timeout--;
   12576 	}
   12577 
   12578 	if (timeout == 0) {
   12579 		aprint_error_dev(sc->sc_dev,
   12580 		    "could not acquire SWSM SMBI\n");
   12581 		return 1;
   12582 	}
   12583 
   12584 	/* Get the FW semaphore. */
   12585 	timeout = sc->sc_nvm_wordsize + 1;
   12586 	while (timeout) {
   12587 		swsm = CSR_READ(sc, WMREG_SWSM);
   12588 		swsm |= SWSM_SWESMBI;
   12589 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12590 		/* If we managed to set the bit we got the semaphore. */
   12591 		swsm = CSR_READ(sc, WMREG_SWSM);
   12592 		if (swsm & SWSM_SWESMBI)
   12593 			break;
   12594 
   12595 		delay(50);
   12596 		timeout--;
   12597 	}
   12598 
   12599 	if (timeout == 0) {
   12600 		aprint_error_dev(sc->sc_dev,
   12601 		    "could not acquire SWSM SWESMBI\n");
   12602 		/* Release semaphores */
   12603 		wm_put_swsm_semaphore(sc);
   12604 		return 1;
   12605 	}
   12606 	return 0;
   12607 }
   12608 
   12609 /*
   12610  * Put hardware semaphore.
   12611  * Same as e1000_put_hw_semaphore_generic()
   12612  */
   12613 static void
   12614 wm_put_swsm_semaphore(struct wm_softc *sc)
   12615 {
   12616 	uint32_t swsm;
   12617 
   12618 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12619 		device_xname(sc->sc_dev), __func__));
   12620 
   12621 	swsm = CSR_READ(sc, WMREG_SWSM);
   12622 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12623 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12624 }
   12625 
   12626 /*
   12627  * Get SW/FW semaphore.
   12628  * Same as e1000_acquire_swfw_sync_82575().
   12629  */
   12630 static int
   12631 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12632 {
   12633 	uint32_t swfw_sync;
   12634 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12635 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12636 	int timeout = 200;
   12637 
   12638 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12639 		device_xname(sc->sc_dev), __func__));
   12640 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12641 
   12642 	for (timeout = 0; timeout < 200; timeout++) {
   12643 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12644 			if (wm_get_swsm_semaphore(sc)) {
   12645 				aprint_error_dev(sc->sc_dev,
   12646 				    "%s: failed to get semaphore\n",
   12647 				    __func__);
   12648 				return 1;
   12649 			}
   12650 		}
   12651 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12652 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12653 			swfw_sync |= swmask;
   12654 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12655 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12656 				wm_put_swsm_semaphore(sc);
   12657 			return 0;
   12658 		}
   12659 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12660 			wm_put_swsm_semaphore(sc);
   12661 		delay(5000);
   12662 	}
   12663 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12664 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12665 	return 1;
   12666 }
   12667 
   12668 static void
   12669 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12670 {
   12671 	uint32_t swfw_sync;
   12672 
   12673 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12674 		device_xname(sc->sc_dev), __func__));
   12675 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12676 
   12677 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12678 		while (wm_get_swsm_semaphore(sc) != 0)
   12679 			continue;
   12680 	}
   12681 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12682 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12683 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12684 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12685 		wm_put_swsm_semaphore(sc);
   12686 }
   12687 
   12688 static int
   12689 wm_get_phy_82575(struct wm_softc *sc)
   12690 {
   12691 
   12692 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12693 		device_xname(sc->sc_dev), __func__));
   12694 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12695 }
   12696 
   12697 static void
   12698 wm_put_phy_82575(struct wm_softc *sc)
   12699 {
   12700 
   12701 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12702 		device_xname(sc->sc_dev), __func__));
   12703 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12704 }
   12705 
   12706 static int
   12707 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12708 {
   12709 	uint32_t ext_ctrl;
   12710 	int timeout = 200;
   12711 
   12712 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12713 		device_xname(sc->sc_dev), __func__));
   12714 
   12715 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12716 	for (timeout = 0; timeout < 200; timeout++) {
   12717 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12718 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12719 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12720 
   12721 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12722 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12723 			return 0;
   12724 		delay(5000);
   12725 	}
   12726 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12727 	    device_xname(sc->sc_dev), ext_ctrl);
   12728 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12729 	return 1;
   12730 }
   12731 
   12732 static void
   12733 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12734 {
   12735 	uint32_t ext_ctrl;
   12736 
   12737 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12738 		device_xname(sc->sc_dev), __func__));
   12739 
   12740 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12741 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12742 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12743 
   12744 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12745 }
   12746 
   12747 static int
   12748 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12749 {
   12750 	uint32_t ext_ctrl;
   12751 	int timeout;
   12752 
   12753 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12754 		device_xname(sc->sc_dev), __func__));
   12755 	mutex_enter(sc->sc_ich_phymtx);
   12756 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12757 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12758 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12759 			break;
   12760 		delay(1000);
   12761 	}
   12762 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12763 		printf("%s: SW has already locked the resource\n",
   12764 		    device_xname(sc->sc_dev));
   12765 		goto out;
   12766 	}
   12767 
   12768 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12769 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12770 	for (timeout = 0; timeout < 1000; timeout++) {
   12771 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12772 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12773 			break;
   12774 		delay(1000);
   12775 	}
   12776 	if (timeout >= 1000) {
   12777 		printf("%s: failed to acquire semaphore\n",
   12778 		    device_xname(sc->sc_dev));
   12779 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12780 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12781 		goto out;
   12782 	}
   12783 	return 0;
   12784 
   12785 out:
   12786 	mutex_exit(sc->sc_ich_phymtx);
   12787 	return 1;
   12788 }
   12789 
   12790 static void
   12791 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12792 {
   12793 	uint32_t ext_ctrl;
   12794 
   12795 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12796 		device_xname(sc->sc_dev), __func__));
   12797 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12798 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12799 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12800 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12801 	} else {
   12802 		printf("%s: Semaphore unexpectedly released\n",
   12803 		    device_xname(sc->sc_dev));
   12804 	}
   12805 
   12806 	mutex_exit(sc->sc_ich_phymtx);
   12807 }
   12808 
   12809 static int
   12810 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12811 {
   12812 
   12813 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12814 		device_xname(sc->sc_dev), __func__));
   12815 	mutex_enter(sc->sc_ich_nvmmtx);
   12816 
   12817 	return 0;
   12818 }
   12819 
   12820 static void
   12821 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12822 {
   12823 
   12824 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12825 		device_xname(sc->sc_dev), __func__));
   12826 	mutex_exit(sc->sc_ich_nvmmtx);
   12827 }
   12828 
   12829 static int
   12830 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12831 {
   12832 	int i = 0;
   12833 	uint32_t reg;
   12834 
   12835 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12836 		device_xname(sc->sc_dev), __func__));
   12837 
   12838 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12839 	do {
   12840 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12841 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12842 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12843 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12844 			break;
   12845 		delay(2*1000);
   12846 		i++;
   12847 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12848 
   12849 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12850 		wm_put_hw_semaphore_82573(sc);
   12851 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12852 		    device_xname(sc->sc_dev));
   12853 		return -1;
   12854 	}
   12855 
   12856 	return 0;
   12857 }
   12858 
   12859 static void
   12860 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12861 {
   12862 	uint32_t reg;
   12863 
   12864 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12865 		device_xname(sc->sc_dev), __func__));
   12866 
   12867 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12868 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12869 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12870 }
   12871 
   12872 /*
   12873  * Management mode and power management related subroutines.
   12874  * BMC, AMT, suspend/resume and EEE.
   12875  */
   12876 
   12877 #ifdef WM_WOL
   12878 static int
   12879 wm_check_mng_mode(struct wm_softc *sc)
   12880 {
   12881 	int rv;
   12882 
   12883 	switch (sc->sc_type) {
   12884 	case WM_T_ICH8:
   12885 	case WM_T_ICH9:
   12886 	case WM_T_ICH10:
   12887 	case WM_T_PCH:
   12888 	case WM_T_PCH2:
   12889 	case WM_T_PCH_LPT:
   12890 	case WM_T_PCH_SPT:
   12891 		rv = wm_check_mng_mode_ich8lan(sc);
   12892 		break;
   12893 	case WM_T_82574:
   12894 	case WM_T_82583:
   12895 		rv = wm_check_mng_mode_82574(sc);
   12896 		break;
   12897 	case WM_T_82571:
   12898 	case WM_T_82572:
   12899 	case WM_T_82573:
   12900 	case WM_T_80003:
   12901 		rv = wm_check_mng_mode_generic(sc);
   12902 		break;
   12903 	default:
   12904 		/* noting to do */
   12905 		rv = 0;
   12906 		break;
   12907 	}
   12908 
   12909 	return rv;
   12910 }
   12911 
   12912 static int
   12913 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12914 {
   12915 	uint32_t fwsm;
   12916 
   12917 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12918 
   12919 	if (((fwsm & FWSM_FW_VALID) != 0)
   12920 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12921 		return 1;
   12922 
   12923 	return 0;
   12924 }
   12925 
   12926 static int
   12927 wm_check_mng_mode_82574(struct wm_softc *sc)
   12928 {
   12929 	uint16_t data;
   12930 
   12931 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12932 
   12933 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12934 		return 1;
   12935 
   12936 	return 0;
   12937 }
   12938 
   12939 static int
   12940 wm_check_mng_mode_generic(struct wm_softc *sc)
   12941 {
   12942 	uint32_t fwsm;
   12943 
   12944 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12945 
   12946 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12947 		return 1;
   12948 
   12949 	return 0;
   12950 }
   12951 #endif /* WM_WOL */
   12952 
   12953 static int
   12954 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12955 {
   12956 	uint32_t manc, fwsm, factps;
   12957 
   12958 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12959 		return 0;
   12960 
   12961 	manc = CSR_READ(sc, WMREG_MANC);
   12962 
   12963 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12964 		device_xname(sc->sc_dev), manc));
   12965 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12966 		return 0;
   12967 
   12968 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12969 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12970 		factps = CSR_READ(sc, WMREG_FACTPS);
   12971 		if (((factps & FACTPS_MNGCG) == 0)
   12972 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12973 			return 1;
   12974 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12975 		uint16_t data;
   12976 
   12977 		factps = CSR_READ(sc, WMREG_FACTPS);
   12978 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12979 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12980 			device_xname(sc->sc_dev), factps, data));
   12981 		if (((factps & FACTPS_MNGCG) == 0)
   12982 		    && ((data & NVM_CFG2_MNGM_MASK)
   12983 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12984 			return 1;
   12985 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12986 	    && ((manc & MANC_ASF_EN) == 0))
   12987 		return 1;
   12988 
   12989 	return 0;
   12990 }
   12991 
   12992 static bool
   12993 wm_phy_resetisblocked(struct wm_softc *sc)
   12994 {
   12995 	bool blocked = false;
   12996 	uint32_t reg;
   12997 	int i = 0;
   12998 
   12999 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13000 		device_xname(sc->sc_dev), __func__));
   13001 
   13002 	switch (sc->sc_type) {
   13003 	case WM_T_ICH8:
   13004 	case WM_T_ICH9:
   13005 	case WM_T_ICH10:
   13006 	case WM_T_PCH:
   13007 	case WM_T_PCH2:
   13008 	case WM_T_PCH_LPT:
   13009 	case WM_T_PCH_SPT:
   13010 		do {
   13011 			reg = CSR_READ(sc, WMREG_FWSM);
   13012 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13013 				blocked = true;
   13014 				delay(10*1000);
   13015 				continue;
   13016 			}
   13017 			blocked = false;
   13018 		} while (blocked && (i++ < 30));
   13019 		return blocked;
   13020 		break;
   13021 	case WM_T_82571:
   13022 	case WM_T_82572:
   13023 	case WM_T_82573:
   13024 	case WM_T_82574:
   13025 	case WM_T_82583:
   13026 	case WM_T_80003:
   13027 		reg = CSR_READ(sc, WMREG_MANC);
   13028 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13029 			return true;
   13030 		else
   13031 			return false;
   13032 		break;
   13033 	default:
   13034 		/* no problem */
   13035 		break;
   13036 	}
   13037 
   13038 	return false;
   13039 }
   13040 
   13041 static void
   13042 wm_get_hw_control(struct wm_softc *sc)
   13043 {
   13044 	uint32_t reg;
   13045 
   13046 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13047 		device_xname(sc->sc_dev), __func__));
   13048 
   13049 	if (sc->sc_type == WM_T_82573) {
   13050 		reg = CSR_READ(sc, WMREG_SWSM);
   13051 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13052 	} else if (sc->sc_type >= WM_T_82571) {
   13053 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13054 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13055 	}
   13056 }
   13057 
   13058 static void
   13059 wm_release_hw_control(struct wm_softc *sc)
   13060 {
   13061 	uint32_t reg;
   13062 
   13063 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13064 		device_xname(sc->sc_dev), __func__));
   13065 
   13066 	if (sc->sc_type == WM_T_82573) {
   13067 		reg = CSR_READ(sc, WMREG_SWSM);
   13068 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13069 	} else if (sc->sc_type >= WM_T_82571) {
   13070 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13071 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13072 	}
   13073 }
   13074 
   13075 static void
   13076 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13077 {
   13078 	uint32_t reg;
   13079 
   13080 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13081 		device_xname(sc->sc_dev), __func__));
   13082 
   13083 	if (sc->sc_type < WM_T_PCH2)
   13084 		return;
   13085 
   13086 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13087 
   13088 	if (gate)
   13089 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13090 	else
   13091 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13092 
   13093 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13094 }
   13095 
   13096 static void
   13097 wm_smbustopci(struct wm_softc *sc)
   13098 {
   13099 	uint32_t fwsm, reg;
   13100 	int rv = 0;
   13101 
   13102 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13103 		device_xname(sc->sc_dev), __func__));
   13104 
   13105 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13106 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13107 
   13108 	/* Disable ULP */
   13109 	wm_ulp_disable(sc);
   13110 
   13111 	/* Acquire PHY semaphore */
   13112 	sc->phy.acquire(sc);
   13113 
   13114 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13115 	switch (sc->sc_type) {
   13116 	case WM_T_PCH_LPT:
   13117 	case WM_T_PCH_SPT:
   13118 		if (wm_phy_is_accessible_pchlan(sc))
   13119 			break;
   13120 
   13121 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13122 		reg |= CTRL_EXT_FORCE_SMBUS;
   13123 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13124 #if 0
   13125 		/* XXX Isn't this required??? */
   13126 		CSR_WRITE_FLUSH(sc);
   13127 #endif
   13128 		delay(50 * 1000);
   13129 		/* FALLTHROUGH */
   13130 	case WM_T_PCH2:
   13131 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13132 			break;
   13133 		/* FALLTHROUGH */
   13134 	case WM_T_PCH:
   13135 		if (sc->sc_type == WM_T_PCH)
   13136 			if ((fwsm & FWSM_FW_VALID) != 0)
   13137 				break;
   13138 
   13139 		if (wm_phy_resetisblocked(sc) == true) {
   13140 			printf("XXX reset is blocked(3)\n");
   13141 			break;
   13142 		}
   13143 
   13144 		wm_toggle_lanphypc_pch_lpt(sc);
   13145 
   13146 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13147 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13148 				break;
   13149 
   13150 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13151 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13152 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13153 
   13154 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13155 				break;
   13156 			rv = -1;
   13157 		}
   13158 		break;
   13159 	default:
   13160 		break;
   13161 	}
   13162 
   13163 	/* Release semaphore */
   13164 	sc->phy.release(sc);
   13165 
   13166 	if (rv == 0) {
   13167 		if (wm_phy_resetisblocked(sc)) {
   13168 			printf("XXX reset is blocked(4)\n");
   13169 			goto out;
   13170 		}
   13171 		wm_reset_phy(sc);
   13172 		if (wm_phy_resetisblocked(sc))
   13173 			printf("XXX reset is blocked(4)\n");
   13174 	}
   13175 
   13176 out:
   13177 	/*
   13178 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13179 	 */
   13180 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13181 		delay(10*1000);
   13182 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13183 	}
   13184 }
   13185 
   13186 static void
   13187 wm_init_manageability(struct wm_softc *sc)
   13188 {
   13189 
   13190 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13191 		device_xname(sc->sc_dev), __func__));
   13192 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13193 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13194 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13195 
   13196 		/* Disable hardware interception of ARP */
   13197 		manc &= ~MANC_ARP_EN;
   13198 
   13199 		/* Enable receiving management packets to the host */
   13200 		if (sc->sc_type >= WM_T_82571) {
   13201 			manc |= MANC_EN_MNG2HOST;
   13202 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13203 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13204 		}
   13205 
   13206 		CSR_WRITE(sc, WMREG_MANC, manc);
   13207 	}
   13208 }
   13209 
   13210 static void
   13211 wm_release_manageability(struct wm_softc *sc)
   13212 {
   13213 
   13214 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13215 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13216 
   13217 		manc |= MANC_ARP_EN;
   13218 		if (sc->sc_type >= WM_T_82571)
   13219 			manc &= ~MANC_EN_MNG2HOST;
   13220 
   13221 		CSR_WRITE(sc, WMREG_MANC, manc);
   13222 	}
   13223 }
   13224 
   13225 static void
   13226 wm_get_wakeup(struct wm_softc *sc)
   13227 {
   13228 
   13229 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13230 	switch (sc->sc_type) {
   13231 	case WM_T_82573:
   13232 	case WM_T_82583:
   13233 		sc->sc_flags |= WM_F_HAS_AMT;
   13234 		/* FALLTHROUGH */
   13235 	case WM_T_80003:
   13236 	case WM_T_82575:
   13237 	case WM_T_82576:
   13238 	case WM_T_82580:
   13239 	case WM_T_I350:
   13240 	case WM_T_I354:
   13241 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13242 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13243 		/* FALLTHROUGH */
   13244 	case WM_T_82541:
   13245 	case WM_T_82541_2:
   13246 	case WM_T_82547:
   13247 	case WM_T_82547_2:
   13248 	case WM_T_82571:
   13249 	case WM_T_82572:
   13250 	case WM_T_82574:
   13251 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13252 		break;
   13253 	case WM_T_ICH8:
   13254 	case WM_T_ICH9:
   13255 	case WM_T_ICH10:
   13256 	case WM_T_PCH:
   13257 	case WM_T_PCH2:
   13258 	case WM_T_PCH_LPT:
   13259 	case WM_T_PCH_SPT:
   13260 		sc->sc_flags |= WM_F_HAS_AMT;
   13261 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13262 		break;
   13263 	default:
   13264 		break;
   13265 	}
   13266 
   13267 	/* 1: HAS_MANAGE */
   13268 	if (wm_enable_mng_pass_thru(sc) != 0)
   13269 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13270 
   13271 	/*
   13272 	 * Note that the WOL flags is set after the resetting of the eeprom
   13273 	 * stuff
   13274 	 */
   13275 }
   13276 
   13277 /*
   13278  * Unconfigure Ultra Low Power mode.
   13279  * Only for I217 and newer (see below).
   13280  */
   13281 static void
   13282 wm_ulp_disable(struct wm_softc *sc)
   13283 {
   13284 	uint32_t reg;
   13285 	int i = 0;
   13286 
   13287 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13288 		device_xname(sc->sc_dev), __func__));
   13289 	/* Exclude old devices */
   13290 	if ((sc->sc_type < WM_T_PCH_LPT)
   13291 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13292 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13293 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13294 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13295 		return;
   13296 
   13297 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13298 		/* Request ME un-configure ULP mode in the PHY */
   13299 		reg = CSR_READ(sc, WMREG_H2ME);
   13300 		reg &= ~H2ME_ULP;
   13301 		reg |= H2ME_ENFORCE_SETTINGS;
   13302 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13303 
   13304 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13305 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13306 			if (i++ == 30) {
   13307 				printf("%s timed out\n", __func__);
   13308 				return;
   13309 			}
   13310 			delay(10 * 1000);
   13311 		}
   13312 		reg = CSR_READ(sc, WMREG_H2ME);
   13313 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13314 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13315 
   13316 		return;
   13317 	}
   13318 
   13319 	/* Acquire semaphore */
   13320 	sc->phy.acquire(sc);
   13321 
   13322 	/* Toggle LANPHYPC */
   13323 	wm_toggle_lanphypc_pch_lpt(sc);
   13324 
   13325 	/* Unforce SMBus mode in PHY */
   13326 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13327 	if (reg == 0x0000 || reg == 0xffff) {
   13328 		uint32_t reg2;
   13329 
   13330 		printf("%s: Force SMBus first.\n", __func__);
   13331 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13332 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13333 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13334 		delay(50 * 1000);
   13335 
   13336 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13337 	}
   13338 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13339 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13340 
   13341 	/* Unforce SMBus mode in MAC */
   13342 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13343 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13344 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13345 
   13346 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13347 	reg |= HV_PM_CTRL_K1_ENA;
   13348 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13349 
   13350 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13351 	reg &= ~(I218_ULP_CONFIG1_IND
   13352 	    | I218_ULP_CONFIG1_STICKY_ULP
   13353 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13354 	    | I218_ULP_CONFIG1_WOL_HOST
   13355 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13356 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13357 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13358 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13359 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13360 	reg |= I218_ULP_CONFIG1_START;
   13361 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13362 
   13363 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13364 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13365 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13366 
   13367 	/* Release semaphore */
   13368 	sc->phy.release(sc);
   13369 	wm_gmii_reset(sc);
   13370 	delay(50 * 1000);
   13371 }
   13372 
   13373 /* WOL in the newer chipset interfaces (pchlan) */
   13374 static void
   13375 wm_enable_phy_wakeup(struct wm_softc *sc)
   13376 {
   13377 #if 0
   13378 	uint16_t preg;
   13379 
   13380 	/* Copy MAC RARs to PHY RARs */
   13381 
   13382 	/* Copy MAC MTA to PHY MTA */
   13383 
   13384 	/* Configure PHY Rx Control register */
   13385 
   13386 	/* Enable PHY wakeup in MAC register */
   13387 
   13388 	/* Configure and enable PHY wakeup in PHY registers */
   13389 
   13390 	/* Activate PHY wakeup */
   13391 
   13392 	/* XXX */
   13393 #endif
   13394 }
   13395 
   13396 /* Power down workaround on D3 */
   13397 static void
   13398 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13399 {
   13400 	uint32_t reg;
   13401 	int i;
   13402 
   13403 	for (i = 0; i < 2; i++) {
   13404 		/* Disable link */
   13405 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13406 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13407 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13408 
   13409 		/*
   13410 		 * Call gig speed drop workaround on Gig disable before
   13411 		 * accessing any PHY registers
   13412 		 */
   13413 		if (sc->sc_type == WM_T_ICH8)
   13414 			wm_gig_downshift_workaround_ich8lan(sc);
   13415 
   13416 		/* Write VR power-down enable */
   13417 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13418 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13419 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13420 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13421 
   13422 		/* Read it back and test */
   13423 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13424 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13425 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13426 			break;
   13427 
   13428 		/* Issue PHY reset and repeat at most one more time */
   13429 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13430 	}
   13431 }
   13432 
   13433 static void
   13434 wm_enable_wakeup(struct wm_softc *sc)
   13435 {
   13436 	uint32_t reg, pmreg;
   13437 	pcireg_t pmode;
   13438 
   13439 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13440 		device_xname(sc->sc_dev), __func__));
   13441 
   13442 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13443 		&pmreg, NULL) == 0)
   13444 		return;
   13445 
   13446 	/* Advertise the wakeup capability */
   13447 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13448 	    | CTRL_SWDPIN(3));
   13449 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13450 
   13451 	/* ICH workaround */
   13452 	switch (sc->sc_type) {
   13453 	case WM_T_ICH8:
   13454 	case WM_T_ICH9:
   13455 	case WM_T_ICH10:
   13456 	case WM_T_PCH:
   13457 	case WM_T_PCH2:
   13458 	case WM_T_PCH_LPT:
   13459 	case WM_T_PCH_SPT:
   13460 		/* Disable gig during WOL */
   13461 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13462 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13463 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13464 		if (sc->sc_type == WM_T_PCH)
   13465 			wm_gmii_reset(sc);
   13466 
   13467 		/* Power down workaround */
   13468 		if (sc->sc_phytype == WMPHY_82577) {
   13469 			struct mii_softc *child;
   13470 
   13471 			/* Assume that the PHY is copper */
   13472 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13473 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13474 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13475 				    (768 << 5) | 25, 0x0444); /* magic num */
   13476 		}
   13477 		break;
   13478 	default:
   13479 		break;
   13480 	}
   13481 
   13482 	/* Keep the laser running on fiber adapters */
   13483 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13484 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13485 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13486 		reg |= CTRL_EXT_SWDPIN(3);
   13487 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13488 	}
   13489 
   13490 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13491 #if 0	/* for the multicast packet */
   13492 	reg |= WUFC_MC;
   13493 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13494 #endif
   13495 
   13496 	if (sc->sc_type >= WM_T_PCH)
   13497 		wm_enable_phy_wakeup(sc);
   13498 	else {
   13499 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13500 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13501 	}
   13502 
   13503 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13504 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13505 		|| (sc->sc_type == WM_T_PCH2))
   13506 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13507 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13508 
   13509 	/* Request PME */
   13510 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13511 #if 0
   13512 	/* Disable WOL */
   13513 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13514 #else
   13515 	/* For WOL */
   13516 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13517 #endif
   13518 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13519 }
   13520 
   13521 /* LPLU */
   13522 
   13523 static void
   13524 wm_lplu_d0_disable(struct wm_softc *sc)
   13525 {
   13526 	struct mii_data *mii = &sc->sc_mii;
   13527 	uint32_t reg;
   13528 
   13529 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13530 		device_xname(sc->sc_dev), __func__));
   13531 
   13532 	if (sc->sc_phytype == WMPHY_IFE)
   13533 		return;
   13534 
   13535 	switch (sc->sc_type) {
   13536 	case WM_T_82571:
   13537 	case WM_T_82572:
   13538 	case WM_T_82573:
   13539 	case WM_T_82575:
   13540 	case WM_T_82576:
   13541 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   13542 		reg &= ~PMR_D0_LPLU;
   13543 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   13544 		break;
   13545 	case WM_T_82580:
   13546 	case WM_T_I350:
   13547 	case WM_T_I210:
   13548 	case WM_T_I211:
   13549 		reg = CSR_READ(sc, WMREG_PHPM);
   13550 		reg &= ~PHPM_D0A_LPLU;
   13551 		CSR_WRITE(sc, WMREG_PHPM, reg);
   13552 		break;
   13553 	case WM_T_82574:
   13554 	case WM_T_82583:
   13555 	case WM_T_ICH8:
   13556 	case WM_T_ICH9:
   13557 	case WM_T_ICH10:
   13558 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13559 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13560 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13561 		CSR_WRITE_FLUSH(sc);
   13562 		break;
   13563 	case WM_T_PCH:
   13564 	case WM_T_PCH2:
   13565 	case WM_T_PCH_LPT:
   13566 	case WM_T_PCH_SPT:
   13567 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13568 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13569 		if (wm_phy_resetisblocked(sc) == false)
   13570 			reg |= HV_OEM_BITS_ANEGNOW;
   13571 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13572 		break;
   13573 	default:
   13574 		break;
   13575 	}
   13576 }
   13577 
   13578 /* EEE */
   13579 
   13580 static void
   13581 wm_set_eee_i350(struct wm_softc *sc)
   13582 {
   13583 	uint32_t ipcnfg, eeer;
   13584 
   13585 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13586 	eeer = CSR_READ(sc, WMREG_EEER);
   13587 
   13588 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13589 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13590 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13591 		    | EEER_LPI_FC);
   13592 	} else {
   13593 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13594 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13595 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13596 		    | EEER_LPI_FC);
   13597 	}
   13598 
   13599 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13600 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13601 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13602 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13603 }
   13604 
   13605 /*
   13606  * Workarounds (mainly PHY related).
   13607  * Basically, PHY's workarounds are in the PHY drivers.
   13608  */
   13609 
   13610 /* Work-around for 82566 Kumeran PCS lock loss */
   13611 static void
   13612 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13613 {
   13614 	struct mii_data *mii = &sc->sc_mii;
   13615 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   13616 	int i;
   13617 	int reg;
   13618 
   13619 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13620 		device_xname(sc->sc_dev), __func__));
   13621 
   13622 	/* If the link is not up, do nothing */
   13623 	if ((status & STATUS_LU) == 0)
   13624 		return;
   13625 
   13626 	/* Nothing to do if the link is other than 1Gbps */
   13627 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   13628 		return;
   13629 
   13630 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13631 	for (i = 0; i < 10; i++) {
   13632 		/* read twice */
   13633 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13634 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13635 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13636 			goto out;	/* GOOD! */
   13637 
   13638 		/* Reset the PHY */
   13639 		wm_reset_phy(sc);
   13640 		delay(5*1000);
   13641 	}
   13642 
   13643 	/* Disable GigE link negotiation */
   13644 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13645 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13646 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13647 
   13648 	/*
   13649 	 * Call gig speed drop workaround on Gig disable before accessing
   13650 	 * any PHY registers.
   13651 	 */
   13652 	wm_gig_downshift_workaround_ich8lan(sc);
   13653 
   13654 out:
   13655 	return;
   13656 }
   13657 
   13658 /* WOL from S5 stops working */
   13659 static void
   13660 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13661 {
   13662 	uint16_t kmrn_reg;
   13663 
   13664 	/* Only for igp3 */
   13665 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13666 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13667 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13668 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13669 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13670 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13671 	}
   13672 }
   13673 
   13674 /*
   13675  * Workaround for pch's PHYs
   13676  * XXX should be moved to new PHY driver?
   13677  */
   13678 static void
   13679 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13680 {
   13681 
   13682 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13683 		device_xname(sc->sc_dev), __func__));
   13684 	KASSERT(sc->sc_type == WM_T_PCH);
   13685 
   13686 	if (sc->sc_phytype == WMPHY_82577)
   13687 		wm_set_mdio_slow_mode_hv(sc);
   13688 
   13689 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13690 
   13691 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13692 
   13693 	/* 82578 */
   13694 	if (sc->sc_phytype == WMPHY_82578) {
   13695 		struct mii_softc *child;
   13696 
   13697 		/*
   13698 		 * Return registers to default by doing a soft reset then
   13699 		 * writing 0x3140 to the control register
   13700 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13701 		 */
   13702 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13703 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13704 			PHY_RESET(child);
   13705 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13706 			    0x3140);
   13707 		}
   13708 	}
   13709 
   13710 	/* Select page 0 */
   13711 	sc->phy.acquire(sc);
   13712 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13713 	sc->phy.release(sc);
   13714 
   13715 	/*
   13716 	 * Configure the K1 Si workaround during phy reset assuming there is
   13717 	 * link so that it disables K1 if link is in 1Gbps.
   13718 	 */
   13719 	wm_k1_gig_workaround_hv(sc, 1);
   13720 }
   13721 
   13722 static void
   13723 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13724 {
   13725 
   13726 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13727 		device_xname(sc->sc_dev), __func__));
   13728 	KASSERT(sc->sc_type == WM_T_PCH2);
   13729 
   13730 	wm_set_mdio_slow_mode_hv(sc);
   13731 }
   13732 
   13733 static int
   13734 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13735 {
   13736 	int k1_enable = sc->sc_nvm_k1_enabled;
   13737 
   13738 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13739 		device_xname(sc->sc_dev), __func__));
   13740 
   13741 	if (sc->phy.acquire(sc) != 0)
   13742 		return -1;
   13743 
   13744 	if (link) {
   13745 		k1_enable = 0;
   13746 
   13747 		/* Link stall fix for link up */
   13748 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13749 	} else {
   13750 		/* Link stall fix for link down */
   13751 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13752 	}
   13753 
   13754 	wm_configure_k1_ich8lan(sc, k1_enable);
   13755 	sc->phy.release(sc);
   13756 
   13757 	return 0;
   13758 }
   13759 
   13760 static void
   13761 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13762 {
   13763 	uint32_t reg;
   13764 
   13765 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13766 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13767 	    reg | HV_KMRN_MDIO_SLOW);
   13768 }
   13769 
   13770 static void
   13771 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13772 {
   13773 	uint32_t ctrl, ctrl_ext, tmp;
   13774 	uint16_t kmrn_reg;
   13775 
   13776 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13777 
   13778 	if (k1_enable)
   13779 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13780 	else
   13781 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13782 
   13783 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13784 
   13785 	delay(20);
   13786 
   13787 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13788 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13789 
   13790 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13791 	tmp |= CTRL_FRCSPD;
   13792 
   13793 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13794 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13795 	CSR_WRITE_FLUSH(sc);
   13796 	delay(20);
   13797 
   13798 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13799 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13800 	CSR_WRITE_FLUSH(sc);
   13801 	delay(20);
   13802 }
   13803 
   13804 /* special case - for 82575 - need to do manual init ... */
   13805 static void
   13806 wm_reset_init_script_82575(struct wm_softc *sc)
   13807 {
   13808 	/*
   13809 	 * remark: this is untested code - we have no board without EEPROM
   13810 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13811 	 */
   13812 
   13813 	/* SerDes configuration via SERDESCTRL */
   13814 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13815 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13816 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13817 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13818 
   13819 	/* CCM configuration via CCMCTL register */
   13820 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13821 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13822 
   13823 	/* PCIe lanes configuration */
   13824 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13825 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13826 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13827 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13828 
   13829 	/* PCIe PLL Configuration */
   13830 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13831 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13832 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13833 }
   13834 
   13835 static void
   13836 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13837 {
   13838 	uint32_t reg;
   13839 	uint16_t nvmword;
   13840 	int rv;
   13841 
   13842 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13843 		return;
   13844 
   13845 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13846 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13847 	if (rv != 0) {
   13848 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13849 		    __func__);
   13850 		return;
   13851 	}
   13852 
   13853 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13854 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13855 		reg |= MDICNFG_DEST;
   13856 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13857 		reg |= MDICNFG_COM_MDIO;
   13858 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13859 }
   13860 
   13861 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13862 
   13863 static bool
   13864 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13865 {
   13866 	int i;
   13867 	uint32_t reg;
   13868 	uint16_t id1, id2;
   13869 
   13870 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13871 		device_xname(sc->sc_dev), __func__));
   13872 	id1 = id2 = 0xffff;
   13873 	for (i = 0; i < 2; i++) {
   13874 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13875 		if (MII_INVALIDID(id1))
   13876 			continue;
   13877 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13878 		if (MII_INVALIDID(id2))
   13879 			continue;
   13880 		break;
   13881 	}
   13882 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13883 		goto out;
   13884 	}
   13885 
   13886 	if (sc->sc_type < WM_T_PCH_LPT) {
   13887 		sc->phy.release(sc);
   13888 		wm_set_mdio_slow_mode_hv(sc);
   13889 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13890 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13891 		sc->phy.acquire(sc);
   13892 	}
   13893 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13894 		printf("XXX return with false\n");
   13895 		return false;
   13896 	}
   13897 out:
   13898 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13899 		/* Only unforce SMBus if ME is not active */
   13900 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13901 			/* Unforce SMBus mode in PHY */
   13902 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13903 			    CV_SMB_CTRL);
   13904 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13905 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13906 			    CV_SMB_CTRL, reg);
   13907 
   13908 			/* Unforce SMBus mode in MAC */
   13909 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13910 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13911 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13912 		}
   13913 	}
   13914 	return true;
   13915 }
   13916 
   13917 static void
   13918 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13919 {
   13920 	uint32_t reg;
   13921 	int i;
   13922 
   13923 	/* Set PHY Config Counter to 50msec */
   13924 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13925 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13926 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13927 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13928 
   13929 	/* Toggle LANPHYPC */
   13930 	reg = CSR_READ(sc, WMREG_CTRL);
   13931 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13932 	reg &= ~CTRL_LANPHYPC_VALUE;
   13933 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13934 	CSR_WRITE_FLUSH(sc);
   13935 	delay(1000);
   13936 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13937 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13938 	CSR_WRITE_FLUSH(sc);
   13939 
   13940 	if (sc->sc_type < WM_T_PCH_LPT)
   13941 		delay(50 * 1000);
   13942 	else {
   13943 		i = 20;
   13944 
   13945 		do {
   13946 			delay(5 * 1000);
   13947 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13948 		    && i--);
   13949 
   13950 		delay(30 * 1000);
   13951 	}
   13952 }
   13953 
   13954 static int
   13955 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13956 {
   13957 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13958 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13959 	uint32_t rxa;
   13960 	uint16_t scale = 0, lat_enc = 0;
   13961 	int32_t obff_hwm = 0;
   13962 	int64_t lat_ns, value;
   13963 
   13964 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13965 		device_xname(sc->sc_dev), __func__));
   13966 
   13967 	if (link) {
   13968 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13969 		uint32_t status;
   13970 		uint16_t speed;
   13971 		pcireg_t preg;
   13972 
   13973 		status = CSR_READ(sc, WMREG_STATUS);
   13974 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13975 		case STATUS_SPEED_10:
   13976 			speed = 10;
   13977 			break;
   13978 		case STATUS_SPEED_100:
   13979 			speed = 100;
   13980 			break;
   13981 		case STATUS_SPEED_1000:
   13982 			speed = 1000;
   13983 			break;
   13984 		default:
   13985 			device_printf(sc->sc_dev, "Unknown speed "
   13986 			    "(status = %08x)\n", status);
   13987 			return -1;
   13988 		}
   13989 
   13990 		/* Rx Packet Buffer Allocation size (KB) */
   13991 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13992 
   13993 		/*
   13994 		 * Determine the maximum latency tolerated by the device.
   13995 		 *
   13996 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13997 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13998 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13999 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14000 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14001 		 */
   14002 		lat_ns = ((int64_t)rxa * 1024 -
   14003 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14004 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14005 		if (lat_ns < 0)
   14006 			lat_ns = 0;
   14007 		else
   14008 			lat_ns /= speed;
   14009 		value = lat_ns;
   14010 
   14011 		while (value > LTRV_VALUE) {
   14012 			scale ++;
   14013 			value = howmany(value, __BIT(5));
   14014 		}
   14015 		if (scale > LTRV_SCALE_MAX) {
   14016 			printf("%s: Invalid LTR latency scale %d\n",
   14017 			    device_xname(sc->sc_dev), scale);
   14018 			return -1;
   14019 		}
   14020 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14021 
   14022 		/* Determine the maximum latency tolerated by the platform */
   14023 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14024 		    WM_PCI_LTR_CAP_LPT);
   14025 		max_snoop = preg & 0xffff;
   14026 		max_nosnoop = preg >> 16;
   14027 
   14028 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14029 
   14030 		if (lat_enc > max_ltr_enc) {
   14031 			lat_enc = max_ltr_enc;
   14032 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14033 			    * PCI_LTR_SCALETONS(
   14034 				    __SHIFTOUT(lat_enc,
   14035 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14036 		}
   14037 
   14038 		if (lat_ns) {
   14039 			lat_ns *= speed * 1000;
   14040 			lat_ns /= 8;
   14041 			lat_ns /= 1000000000;
   14042 			obff_hwm = (int32_t)(rxa - lat_ns);
   14043 		}
   14044 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14045 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14046 			    "(rxa = %d, lat_ns = %d)\n",
   14047 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14048 			return -1;
   14049 		}
   14050 	}
   14051 	/* Snoop and No-Snoop latencies the same */
   14052 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14053 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14054 
   14055 	/* Set OBFF high water mark */
   14056 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14057 	reg |= obff_hwm;
   14058 	CSR_WRITE(sc, WMREG_SVT, reg);
   14059 
   14060 	/* Enable OBFF */
   14061 	reg = CSR_READ(sc, WMREG_SVCR);
   14062 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14063 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14064 
   14065 	return 0;
   14066 }
   14067 
   14068 /*
   14069  * I210 Errata 25 and I211 Errata 10
   14070  * Slow System Clock.
   14071  */
   14072 static void
   14073 wm_pll_workaround_i210(struct wm_softc *sc)
   14074 {
   14075 	uint32_t mdicnfg, wuc;
   14076 	uint32_t reg;
   14077 	pcireg_t pcireg;
   14078 	uint32_t pmreg;
   14079 	uint16_t nvmword, tmp_nvmword;
   14080 	int phyval;
   14081 	bool wa_done = false;
   14082 	int i;
   14083 
   14084 	/* Save WUC and MDICNFG registers */
   14085 	wuc = CSR_READ(sc, WMREG_WUC);
   14086 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14087 
   14088 	reg = mdicnfg & ~MDICNFG_DEST;
   14089 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14090 
   14091 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14092 		nvmword = INVM_DEFAULT_AL;
   14093 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14094 
   14095 	/* Get Power Management cap offset */
   14096 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14097 		&pmreg, NULL) == 0)
   14098 		return;
   14099 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14100 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14101 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14102 
   14103 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14104 			break; /* OK */
   14105 		}
   14106 
   14107 		wa_done = true;
   14108 		/* Directly reset the internal PHY */
   14109 		reg = CSR_READ(sc, WMREG_CTRL);
   14110 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14111 
   14112 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14113 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14114 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14115 
   14116 		CSR_WRITE(sc, WMREG_WUC, 0);
   14117 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14118 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14119 
   14120 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14121 		    pmreg + PCI_PMCSR);
   14122 		pcireg |= PCI_PMCSR_STATE_D3;
   14123 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14124 		    pmreg + PCI_PMCSR, pcireg);
   14125 		delay(1000);
   14126 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14127 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14128 		    pmreg + PCI_PMCSR, pcireg);
   14129 
   14130 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14131 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14132 
   14133 		/* Restore WUC register */
   14134 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14135 	}
   14136 
   14137 	/* Restore MDICNFG setting */
   14138 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14139 	if (wa_done)
   14140 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14141 }
   14142 
   14143 static void
   14144 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14145 {
   14146 	uint32_t reg;
   14147 
   14148 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14149 		device_xname(sc->sc_dev), __func__));
   14150 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14151 
   14152 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14153 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14154 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14155 
   14156 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14157 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14158 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14159 }
   14160