Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.490
      1 /*	$NetBSD: if_wm.c,v 1.490 2017/03/01 08:31:06 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Split header buffer for newer descriptors
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.490 2017/03/01 08:31:06 knakahara Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #endif
    165 
    166 /*
    167  * This device driver's max interrupt numbers.
    168  */
    169 #define WM_MAX_NQUEUEINTR	16
    170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 #define	WM_TXINTERQSIZE		256
    201 
    202 /*
    203  * Receive descriptor list size.  We have one Rx buffer for normal
    204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    205  * packet.  We allocate 256 receive descriptors, each with a 2k
    206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    207  */
    208 #define	WM_NRXDESC		256
    209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    212 
    213 typedef union txdescs {
    214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    216 } txdescs_t;
    217 
    218 typedef union rxdescs {
    219 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    220 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    221 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    222 } rxdescs_t;
    223 
    224 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    225 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    226 
    227 /*
    228  * Software state for transmit jobs.
    229  */
    230 struct wm_txsoft {
    231 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    232 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    233 	int txs_firstdesc;		/* first descriptor in packet */
    234 	int txs_lastdesc;		/* last descriptor in packet */
    235 	int txs_ndesc;			/* # of descriptors used */
    236 };
    237 
    238 /*
    239  * Software state for receive buffers.  Each descriptor gets a
    240  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    241  * more than one buffer, we chain them together.
    242  */
    243 struct wm_rxsoft {
    244 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    245 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    246 };
    247 
    248 #define WM_LINKUP_TIMEOUT	50
    249 
    250 static uint16_t swfwphysem[] = {
    251 	SWFW_PHY0_SM,
    252 	SWFW_PHY1_SM,
    253 	SWFW_PHY2_SM,
    254 	SWFW_PHY3_SM
    255 };
    256 
    257 static const uint32_t wm_82580_rxpbs_table[] = {
    258 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    259 };
    260 
    261 struct wm_softc;
    262 
    263 #ifdef WM_EVENT_COUNTERS
    264 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    265 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    266 	struct evcnt qname##_ev_##evname;
    267 
    268 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    269 	do{								\
    270 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    271 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    272 		    "%s%02d%s", #qname, (qnum), #evname);		\
    273 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    274 		    (evtype), NULL, (xname),				\
    275 		    (q)->qname##_##evname##_evcnt_name);		\
    276 	}while(0)
    277 
    278 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    279 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    280 
    281 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    282 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    283 
    284 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    285 	evcnt_detach(&(q)->qname##_ev_##evname);
    286 #endif /* WM_EVENT_COUNTERS */
    287 
    288 struct wm_txqueue {
    289 	kmutex_t *txq_lock;		/* lock for tx operations */
    290 
    291 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    292 
    293 	/* Software state for the transmit descriptors. */
    294 	int txq_num;			/* must be a power of two */
    295 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    296 
    297 	/* TX control data structures. */
    298 	int txq_ndesc;			/* must be a power of two */
    299 	size_t txq_descsize;		/* a tx descriptor size */
    300 	txdescs_t *txq_descs_u;
    301         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    302 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    303 	int txq_desc_rseg;		/* real number of control segment */
    304 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    305 #define	txq_descs	txq_descs_u->sctxu_txdescs
    306 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    307 
    308 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    309 
    310 	int txq_free;			/* number of free Tx descriptors */
    311 	int txq_next;			/* next ready Tx descriptor */
    312 
    313 	int txq_sfree;			/* number of free Tx jobs */
    314 	int txq_snext;			/* next free Tx job */
    315 	int txq_sdirty;			/* dirty Tx jobs */
    316 
    317 	/* These 4 variables are used only on the 82547. */
    318 	int txq_fifo_size;		/* Tx FIFO size */
    319 	int txq_fifo_head;		/* current head of FIFO */
    320 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    321 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    322 
    323 	/*
    324 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    325 	 * CPUs. This queue intermediate them without block.
    326 	 */
    327 	pcq_t *txq_interq;
    328 
    329 	/*
    330 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    331 	 * to manage Tx H/W queue's busy flag.
    332 	 */
    333 	int txq_flags;			/* flags for H/W queue, see below */
    334 #define	WM_TXQ_NO_SPACE	0x1
    335 
    336 	bool txq_stopping;
    337 
    338 #ifdef WM_EVENT_COUNTERS
    339 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    340 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    341 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    342 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    343 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    344 						/* XXX not used? */
    345 
    346 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    347 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    348 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    349 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    350 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    351 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    352 
    353 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    354 
    355 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    356 
    357 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    358 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    359 #endif /* WM_EVENT_COUNTERS */
    360 };
    361 
    362 struct wm_rxqueue {
    363 	kmutex_t *rxq_lock;		/* lock for rx operations */
    364 
    365 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    366 
    367 	/* Software state for the receive descriptors. */
    368 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    369 
    370 	/* RX control data structures. */
    371 	int rxq_ndesc;			/* must be a power of two */
    372 	size_t rxq_descsize;		/* a rx descriptor size */
    373 	rxdescs_t *rxq_descs_u;
    374 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    375 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    376 	int rxq_desc_rseg;		/* real number of control segment */
    377 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    378 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    379 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    380 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    381 
    382 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    383 
    384 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    385 	int rxq_discard;
    386 	int rxq_len;
    387 	struct mbuf *rxq_head;
    388 	struct mbuf *rxq_tail;
    389 	struct mbuf **rxq_tailp;
    390 
    391 	bool rxq_stopping;
    392 
    393 #ifdef WM_EVENT_COUNTERS
    394 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    395 
    396 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    397 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    398 #endif
    399 };
    400 
    401 struct wm_queue {
    402 	int wmq_id;			/* index of transmit and receive queues */
    403 	int wmq_intr_idx;		/* index of MSI-X tables */
    404 
    405 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    406 
    407 	struct wm_txqueue wmq_txq;
    408 	struct wm_rxqueue wmq_rxq;
    409 
    410 	void *wmq_si;
    411 };
    412 
    413 struct wm_phyop {
    414 	int (*acquire)(struct wm_softc *);
    415 	void (*release)(struct wm_softc *);
    416 	int reset_delay_us;
    417 };
    418 
    419 /*
    420  * Software state per device.
    421  */
    422 struct wm_softc {
    423 	device_t sc_dev;		/* generic device information */
    424 	bus_space_tag_t sc_st;		/* bus space tag */
    425 	bus_space_handle_t sc_sh;	/* bus space handle */
    426 	bus_size_t sc_ss;		/* bus space size */
    427 	bus_space_tag_t sc_iot;		/* I/O space tag */
    428 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    429 	bus_size_t sc_ios;		/* I/O space size */
    430 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    431 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    432 	bus_size_t sc_flashs;		/* flash registers space size */
    433 	off_t sc_flashreg_offset;	/*
    434 					 * offset to flash registers from
    435 					 * start of BAR
    436 					 */
    437 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    438 
    439 	struct ethercom sc_ethercom;	/* ethernet common data */
    440 	struct mii_data sc_mii;		/* MII/media information */
    441 
    442 	pci_chipset_tag_t sc_pc;
    443 	pcitag_t sc_pcitag;
    444 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    445 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    446 
    447 	uint16_t sc_pcidevid;		/* PCI device ID */
    448 	wm_chip_type sc_type;		/* MAC type */
    449 	int sc_rev;			/* MAC revision */
    450 	wm_phy_type sc_phytype;		/* PHY type */
    451 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    452 #define	WM_MEDIATYPE_UNKNOWN		0x00
    453 #define	WM_MEDIATYPE_FIBER		0x01
    454 #define	WM_MEDIATYPE_COPPER		0x02
    455 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    456 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    457 	int sc_flags;			/* flags; see below */
    458 	int sc_if_flags;		/* last if_flags */
    459 	int sc_flowflags;		/* 802.3x flow control flags */
    460 	int sc_align_tweak;
    461 
    462 	void *sc_ihs[WM_MAX_NINTR];	/*
    463 					 * interrupt cookie.
    464 					 * legacy and msi use sc_ihs[0].
    465 					 */
    466 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    467 	int sc_nintrs;			/* number of interrupts */
    468 
    469 	int sc_link_intr_idx;		/* index of MSI-X tables */
    470 
    471 	callout_t sc_tick_ch;		/* tick callout */
    472 	bool sc_core_stopping;
    473 
    474 	int sc_nvm_ver_major;
    475 	int sc_nvm_ver_minor;
    476 	int sc_nvm_ver_build;
    477 	int sc_nvm_addrbits;		/* NVM address bits */
    478 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    479 	int sc_ich8_flash_base;
    480 	int sc_ich8_flash_bank_size;
    481 	int sc_nvm_k1_enabled;
    482 
    483 	int sc_nqueues;
    484 	struct wm_queue *sc_queue;
    485 
    486 	int sc_affinity_offset;
    487 
    488 #ifdef WM_EVENT_COUNTERS
    489 	/* Event counters. */
    490 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    491 
    492         /* WM_T_82542_2_1 only */
    493 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    494 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    495 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    496 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    497 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    498 #endif /* WM_EVENT_COUNTERS */
    499 
    500 	/* This variable are used only on the 82547. */
    501 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    502 
    503 	uint32_t sc_ctrl;		/* prototype CTRL register */
    504 #if 0
    505 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    506 #endif
    507 	uint32_t sc_icr;		/* prototype interrupt bits */
    508 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    509 	uint32_t sc_tctl;		/* prototype TCTL register */
    510 	uint32_t sc_rctl;		/* prototype RCTL register */
    511 	uint32_t sc_txcw;		/* prototype TXCW register */
    512 	uint32_t sc_tipg;		/* prototype TIPG register */
    513 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    514 	uint32_t sc_pba;		/* prototype PBA register */
    515 
    516 	int sc_tbi_linkup;		/* TBI link status */
    517 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    518 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    519 
    520 	int sc_mchash_type;		/* multicast filter offset */
    521 
    522 	krndsource_t rnd_source;	/* random source */
    523 
    524 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    525 
    526 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    527 	kmutex_t *sc_ich_phymtx;	/*
    528 					 * 82574/82583/ICH/PCH specific PHY
    529 					 * mutex. For 82574/82583, the mutex
    530 					 * is used for both PHY and NVM.
    531 					 */
    532 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    533 
    534 	struct wm_phyop phy;
    535 };
    536 
    537 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    538 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    539 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    540 
    541 #ifdef WM_MPSAFE
    542 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    543 #else
    544 #define CALLOUT_FLAGS	0
    545 #endif
    546 
    547 #define	WM_RXCHAIN_RESET(rxq)						\
    548 do {									\
    549 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    550 	*(rxq)->rxq_tailp = NULL;					\
    551 	(rxq)->rxq_len = 0;						\
    552 } while (/*CONSTCOND*/0)
    553 
    554 #define	WM_RXCHAIN_LINK(rxq, m)						\
    555 do {									\
    556 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    557 	(rxq)->rxq_tailp = &(m)->m_next;				\
    558 } while (/*CONSTCOND*/0)
    559 
    560 #ifdef WM_EVENT_COUNTERS
    561 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    562 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    563 
    564 #define WM_Q_EVCNT_INCR(qname, evname)			\
    565 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    566 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    567 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    568 #else /* !WM_EVENT_COUNTERS */
    569 #define	WM_EVCNT_INCR(ev)	/* nothing */
    570 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    571 
    572 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    573 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    574 #endif /* !WM_EVENT_COUNTERS */
    575 
    576 #define	CSR_READ(sc, reg)						\
    577 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    578 #define	CSR_WRITE(sc, reg, val)						\
    579 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    580 #define	CSR_WRITE_FLUSH(sc)						\
    581 	(void) CSR_READ((sc), WMREG_STATUS)
    582 
    583 #define ICH8_FLASH_READ32(sc, reg)					\
    584 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    585 	    (reg) + sc->sc_flashreg_offset)
    586 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    587 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    588 	    (reg) + sc->sc_flashreg_offset, (data))
    589 
    590 #define ICH8_FLASH_READ16(sc, reg)					\
    591 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    592 	    (reg) + sc->sc_flashreg_offset)
    593 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    594 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    595 	    (reg) + sc->sc_flashreg_offset, (data))
    596 
    597 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    598 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    599 
    600 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    601 #define	WM_CDTXADDR_HI(txq, x)						\
    602 	(sizeof(bus_addr_t) == 8 ?					\
    603 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    604 
    605 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    606 #define	WM_CDRXADDR_HI(rxq, x)						\
    607 	(sizeof(bus_addr_t) == 8 ?					\
    608 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    609 
    610 /*
    611  * Register read/write functions.
    612  * Other than CSR_{READ|WRITE}().
    613  */
    614 #if 0
    615 static inline uint32_t wm_io_read(struct wm_softc *, int);
    616 #endif
    617 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    618 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    619 	uint32_t, uint32_t);
    620 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    621 
    622 /*
    623  * Descriptor sync/init functions.
    624  */
    625 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    626 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    627 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    628 
    629 /*
    630  * Device driver interface functions and commonly used functions.
    631  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    632  */
    633 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    634 static int	wm_match(device_t, cfdata_t, void *);
    635 static void	wm_attach(device_t, device_t, void *);
    636 static int	wm_detach(device_t, int);
    637 static bool	wm_suspend(device_t, const pmf_qual_t *);
    638 static bool	wm_resume(device_t, const pmf_qual_t *);
    639 static void	wm_watchdog(struct ifnet *);
    640 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    641 static void	wm_tick(void *);
    642 static int	wm_ifflags_cb(struct ethercom *);
    643 static int	wm_ioctl(struct ifnet *, u_long, void *);
    644 /* MAC address related */
    645 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    646 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    647 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    648 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    649 static void	wm_set_filter(struct wm_softc *);
    650 /* Reset and init related */
    651 static void	wm_set_vlan(struct wm_softc *);
    652 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    653 static void	wm_get_auto_rd_done(struct wm_softc *);
    654 static void	wm_lan_init_done(struct wm_softc *);
    655 static void	wm_get_cfg_done(struct wm_softc *);
    656 static void	wm_initialize_hardware_bits(struct wm_softc *);
    657 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    658 static void	wm_reset_phy(struct wm_softc *);
    659 static void	wm_flush_desc_rings(struct wm_softc *);
    660 static void	wm_reset(struct wm_softc *);
    661 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    662 static void	wm_rxdrain(struct wm_rxqueue *);
    663 static void	wm_rss_getkey(uint8_t *);
    664 static void	wm_init_rss(struct wm_softc *);
    665 static void	wm_adjust_qnum(struct wm_softc *, int);
    666 static int	wm_setup_legacy(struct wm_softc *);
    667 static int	wm_setup_msix(struct wm_softc *);
    668 static int	wm_init(struct ifnet *);
    669 static int	wm_init_locked(struct ifnet *);
    670 static void	wm_turnon(struct wm_softc *);
    671 static void	wm_turnoff(struct wm_softc *);
    672 static void	wm_stop(struct ifnet *, int);
    673 static void	wm_stop_locked(struct ifnet *, int);
    674 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    675 static void	wm_82547_txfifo_stall(void *);
    676 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    677 /* DMA related */
    678 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    679 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    680 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    681 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    682     struct wm_txqueue *);
    683 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    684 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    685 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    686     struct wm_rxqueue *);
    687 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    688 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    689 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    690 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    691 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    692 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    693 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    694     struct wm_txqueue *);
    695 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    696     struct wm_rxqueue *);
    697 static int	wm_alloc_txrx_queues(struct wm_softc *);
    698 static void	wm_free_txrx_queues(struct wm_softc *);
    699 static int	wm_init_txrx_queues(struct wm_softc *);
    700 /* Start */
    701 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    702     uint32_t *, uint8_t *);
    703 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    704 static void	wm_start(struct ifnet *);
    705 static void	wm_start_locked(struct ifnet *);
    706 static int	wm_transmit(struct ifnet *, struct mbuf *);
    707 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    708 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    709 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    710     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    711 static void	wm_nq_start(struct ifnet *);
    712 static void	wm_nq_start_locked(struct ifnet *);
    713 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    714 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    715 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    716 static void	wm_deferred_start_locked(struct wm_txqueue *);
    717 static void	wm_handle_queue(void *);
    718 /* Interrupt */
    719 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    720 static void	wm_rxeof(struct wm_rxqueue *);
    721 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    722 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    723 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    724 static void	wm_linkintr(struct wm_softc *, uint32_t);
    725 static int	wm_intr_legacy(void *);
    726 static inline void	wm_txrxintr_disable(struct wm_queue *);
    727 static inline void	wm_txrxintr_enable(struct wm_queue *);
    728 static int	wm_txrxintr_msix(void *);
    729 static int	wm_linkintr_msix(void *);
    730 
    731 /*
    732  * Media related.
    733  * GMII, SGMII, TBI, SERDES and SFP.
    734  */
    735 /* Common */
    736 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    737 /* GMII related */
    738 static void	wm_gmii_reset(struct wm_softc *);
    739 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    740 static int	wm_get_phy_id_82575(struct wm_softc *);
    741 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    742 static int	wm_gmii_mediachange(struct ifnet *);
    743 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    744 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    745 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    746 static int	wm_gmii_i82543_readreg(device_t, int, int);
    747 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    748 static int	wm_gmii_mdic_readreg(device_t, int, int);
    749 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    750 static int	wm_gmii_i82544_readreg(device_t, int, int);
    751 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    752 static int	wm_gmii_i80003_readreg(device_t, int, int);
    753 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    754 static int	wm_gmii_bm_readreg(device_t, int, int);
    755 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    756 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    757 static int	wm_gmii_hv_readreg(device_t, int, int);
    758 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    759 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    760 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    761 static int	wm_gmii_82580_readreg(device_t, int, int);
    762 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    763 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    764 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    765 static void	wm_gmii_statchg(struct ifnet *);
    766 /*
    767  * kumeran related (80003, ICH* and PCH*).
    768  * These functions are not for accessing MII registers but for accessing
    769  * kumeran specific registers.
    770  */
    771 static int	wm_kmrn_readreg(struct wm_softc *, int);
    772 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    773 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    774 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    775 /* SGMII */
    776 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    777 static int	wm_sgmii_readreg(device_t, int, int);
    778 static void	wm_sgmii_writereg(device_t, int, int, int);
    779 /* TBI related */
    780 static void	wm_tbi_mediainit(struct wm_softc *);
    781 static int	wm_tbi_mediachange(struct ifnet *);
    782 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    783 static int	wm_check_for_link(struct wm_softc *);
    784 static void	wm_tbi_tick(struct wm_softc *);
    785 /* SERDES related */
    786 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    787 static int	wm_serdes_mediachange(struct ifnet *);
    788 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    789 static void	wm_serdes_tick(struct wm_softc *);
    790 /* SFP related */
    791 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    792 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    793 
    794 /*
    795  * NVM related.
    796  * Microwire, SPI (w/wo EERD) and Flash.
    797  */
    798 /* Misc functions */
    799 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    800 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    801 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    802 /* Microwire */
    803 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    804 /* SPI */
    805 static int	wm_nvm_ready_spi(struct wm_softc *);
    806 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    807 /* Using with EERD */
    808 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    809 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    810 /* Flash */
    811 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    812     unsigned int *);
    813 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    814 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    815 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    816 	uint32_t *);
    817 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    818 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    819 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    820 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    821 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    822 /* iNVM */
    823 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    824 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    825 /* Lock, detecting NVM type, validate checksum and read */
    826 static int	wm_nvm_acquire(struct wm_softc *);
    827 static void	wm_nvm_release(struct wm_softc *);
    828 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    829 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    830 static int	wm_nvm_validate_checksum(struct wm_softc *);
    831 static void	wm_nvm_version_invm(struct wm_softc *);
    832 static void	wm_nvm_version(struct wm_softc *);
    833 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    834 
    835 /*
    836  * Hardware semaphores.
    837  * Very complexed...
    838  */
    839 static int	wm_get_null(struct wm_softc *);
    840 static void	wm_put_null(struct wm_softc *);
    841 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    842 static void	wm_put_swsm_semaphore(struct wm_softc *);
    843 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    844 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    845 static int	wm_get_phy_82575(struct wm_softc *);
    846 static void	wm_put_phy_82575(struct wm_softc *);
    847 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    848 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    849 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    850 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    851 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    852 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    853 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    854 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    855 
    856 /*
    857  * Management mode and power management related subroutines.
    858  * BMC, AMT, suspend/resume and EEE.
    859  */
    860 #if 0
    861 static int	wm_check_mng_mode(struct wm_softc *);
    862 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    863 static int	wm_check_mng_mode_82574(struct wm_softc *);
    864 static int	wm_check_mng_mode_generic(struct wm_softc *);
    865 #endif
    866 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    867 static bool	wm_phy_resetisblocked(struct wm_softc *);
    868 static void	wm_get_hw_control(struct wm_softc *);
    869 static void	wm_release_hw_control(struct wm_softc *);
    870 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    871 static void	wm_smbustopci(struct wm_softc *);
    872 static void	wm_init_manageability(struct wm_softc *);
    873 static void	wm_release_manageability(struct wm_softc *);
    874 static void	wm_get_wakeup(struct wm_softc *);
    875 static void	wm_ulp_disable(struct wm_softc *);
    876 static void	wm_enable_phy_wakeup(struct wm_softc *);
    877 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    878 static void	wm_enable_wakeup(struct wm_softc *);
    879 /* LPLU (Low Power Link Up) */
    880 static void	wm_lplu_d0_disable(struct wm_softc *);
    881 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    882 /* EEE */
    883 static void	wm_set_eee_i350(struct wm_softc *);
    884 
    885 /*
    886  * Workarounds (mainly PHY related).
    887  * Basically, PHY's workarounds are in the PHY drivers.
    888  */
    889 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    890 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    891 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    892 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    893 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    894 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    895 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    896 static void	wm_reset_init_script_82575(struct wm_softc *);
    897 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    898 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    899 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    900 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    901 static void	wm_pll_workaround_i210(struct wm_softc *);
    902 
    903 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    904     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    905 
    906 /*
    907  * Devices supported by this driver.
    908  */
    909 static const struct wm_product {
    910 	pci_vendor_id_t		wmp_vendor;
    911 	pci_product_id_t	wmp_product;
    912 	const char		*wmp_name;
    913 	wm_chip_type		wmp_type;
    914 	uint32_t		wmp_flags;
    915 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    916 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    917 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    918 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    919 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    920 } wm_products[] = {
    921 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    922 	  "Intel i82542 1000BASE-X Ethernet",
    923 	  WM_T_82542_2_1,	WMP_F_FIBER },
    924 
    925 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    926 	  "Intel i82543GC 1000BASE-X Ethernet",
    927 	  WM_T_82543,		WMP_F_FIBER },
    928 
    929 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    930 	  "Intel i82543GC 1000BASE-T Ethernet",
    931 	  WM_T_82543,		WMP_F_COPPER },
    932 
    933 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    934 	  "Intel i82544EI 1000BASE-T Ethernet",
    935 	  WM_T_82544,		WMP_F_COPPER },
    936 
    937 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    938 	  "Intel i82544EI 1000BASE-X Ethernet",
    939 	  WM_T_82544,		WMP_F_FIBER },
    940 
    941 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    942 	  "Intel i82544GC 1000BASE-T Ethernet",
    943 	  WM_T_82544,		WMP_F_COPPER },
    944 
    945 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    946 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    947 	  WM_T_82544,		WMP_F_COPPER },
    948 
    949 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    950 	  "Intel i82540EM 1000BASE-T Ethernet",
    951 	  WM_T_82540,		WMP_F_COPPER },
    952 
    953 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    954 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    955 	  WM_T_82540,		WMP_F_COPPER },
    956 
    957 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    958 	  "Intel i82540EP 1000BASE-T Ethernet",
    959 	  WM_T_82540,		WMP_F_COPPER },
    960 
    961 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    962 	  "Intel i82540EP 1000BASE-T Ethernet",
    963 	  WM_T_82540,		WMP_F_COPPER },
    964 
    965 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    966 	  "Intel i82540EP 1000BASE-T Ethernet",
    967 	  WM_T_82540,		WMP_F_COPPER },
    968 
    969 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    970 	  "Intel i82545EM 1000BASE-T Ethernet",
    971 	  WM_T_82545,		WMP_F_COPPER },
    972 
    973 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    974 	  "Intel i82545GM 1000BASE-T Ethernet",
    975 	  WM_T_82545_3,		WMP_F_COPPER },
    976 
    977 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    978 	  "Intel i82545GM 1000BASE-X Ethernet",
    979 	  WM_T_82545_3,		WMP_F_FIBER },
    980 
    981 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    982 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    983 	  WM_T_82545_3,		WMP_F_SERDES },
    984 
    985 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    986 	  "Intel i82546EB 1000BASE-T Ethernet",
    987 	  WM_T_82546,		WMP_F_COPPER },
    988 
    989 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    990 	  "Intel i82546EB 1000BASE-T Ethernet",
    991 	  WM_T_82546,		WMP_F_COPPER },
    992 
    993 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    994 	  "Intel i82545EM 1000BASE-X Ethernet",
    995 	  WM_T_82545,		WMP_F_FIBER },
    996 
    997 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    998 	  "Intel i82546EB 1000BASE-X Ethernet",
    999 	  WM_T_82546,		WMP_F_FIBER },
   1000 
   1001 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1002 	  "Intel i82546GB 1000BASE-T Ethernet",
   1003 	  WM_T_82546_3,		WMP_F_COPPER },
   1004 
   1005 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1006 	  "Intel i82546GB 1000BASE-X Ethernet",
   1007 	  WM_T_82546_3,		WMP_F_FIBER },
   1008 
   1009 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1010 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1011 	  WM_T_82546_3,		WMP_F_SERDES },
   1012 
   1013 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1014 	  "i82546GB quad-port Gigabit Ethernet",
   1015 	  WM_T_82546_3,		WMP_F_COPPER },
   1016 
   1017 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1018 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1019 	  WM_T_82546_3,		WMP_F_COPPER },
   1020 
   1021 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1022 	  "Intel PRO/1000MT (82546GB)",
   1023 	  WM_T_82546_3,		WMP_F_COPPER },
   1024 
   1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1026 	  "Intel i82541EI 1000BASE-T Ethernet",
   1027 	  WM_T_82541,		WMP_F_COPPER },
   1028 
   1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1030 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1031 	  WM_T_82541,		WMP_F_COPPER },
   1032 
   1033 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1034 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1035 	  WM_T_82541,		WMP_F_COPPER },
   1036 
   1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1038 	  "Intel i82541ER 1000BASE-T Ethernet",
   1039 	  WM_T_82541_2,		WMP_F_COPPER },
   1040 
   1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1042 	  "Intel i82541GI 1000BASE-T Ethernet",
   1043 	  WM_T_82541_2,		WMP_F_COPPER },
   1044 
   1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1046 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1047 	  WM_T_82541_2,		WMP_F_COPPER },
   1048 
   1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1050 	  "Intel i82541PI 1000BASE-T Ethernet",
   1051 	  WM_T_82541_2,		WMP_F_COPPER },
   1052 
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1054 	  "Intel i82547EI 1000BASE-T Ethernet",
   1055 	  WM_T_82547,		WMP_F_COPPER },
   1056 
   1057 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1058 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1059 	  WM_T_82547,		WMP_F_COPPER },
   1060 
   1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1062 	  "Intel i82547GI 1000BASE-T Ethernet",
   1063 	  WM_T_82547_2,		WMP_F_COPPER },
   1064 
   1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1066 	  "Intel PRO/1000 PT (82571EB)",
   1067 	  WM_T_82571,		WMP_F_COPPER },
   1068 
   1069 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1070 	  "Intel PRO/1000 PF (82571EB)",
   1071 	  WM_T_82571,		WMP_F_FIBER },
   1072 
   1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1074 	  "Intel PRO/1000 PB (82571EB)",
   1075 	  WM_T_82571,		WMP_F_SERDES },
   1076 
   1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1078 	  "Intel PRO/1000 QT (82571EB)",
   1079 	  WM_T_82571,		WMP_F_COPPER },
   1080 
   1081 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1082 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1083 	  WM_T_82571,		WMP_F_COPPER, },
   1084 
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1086 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1087 	  WM_T_82571,		WMP_F_COPPER, },
   1088 
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1090 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1091 	  WM_T_82571,		WMP_F_SERDES, },
   1092 
   1093 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1094 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1095 	  WM_T_82571,		WMP_F_SERDES, },
   1096 
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1098 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1099 	  WM_T_82571,		WMP_F_FIBER, },
   1100 
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1102 	  "Intel i82572EI 1000baseT Ethernet",
   1103 	  WM_T_82572,		WMP_F_COPPER },
   1104 
   1105 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1106 	  "Intel i82572EI 1000baseX Ethernet",
   1107 	  WM_T_82572,		WMP_F_FIBER },
   1108 
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1110 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1111 	  WM_T_82572,		WMP_F_SERDES },
   1112 
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1114 	  "Intel i82572EI 1000baseT Ethernet",
   1115 	  WM_T_82572,		WMP_F_COPPER },
   1116 
   1117 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1118 	  "Intel i82573E",
   1119 	  WM_T_82573,		WMP_F_COPPER },
   1120 
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1122 	  "Intel i82573E IAMT",
   1123 	  WM_T_82573,		WMP_F_COPPER },
   1124 
   1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1126 	  "Intel i82573L Gigabit Ethernet",
   1127 	  WM_T_82573,		WMP_F_COPPER },
   1128 
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1130 	  "Intel i82574L",
   1131 	  WM_T_82574,		WMP_F_COPPER },
   1132 
   1133 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1134 	  "Intel i82574L",
   1135 	  WM_T_82574,		WMP_F_COPPER },
   1136 
   1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1138 	  "Intel i82583V",
   1139 	  WM_T_82583,		WMP_F_COPPER },
   1140 
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1142 	  "i80003 dual 1000baseT Ethernet",
   1143 	  WM_T_80003,		WMP_F_COPPER },
   1144 
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1146 	  "i80003 dual 1000baseX Ethernet",
   1147 	  WM_T_80003,		WMP_F_COPPER },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1150 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1151 	  WM_T_80003,		WMP_F_SERDES },
   1152 
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1154 	  "Intel i80003 1000baseT Ethernet",
   1155 	  WM_T_80003,		WMP_F_COPPER },
   1156 
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1158 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1159 	  WM_T_80003,		WMP_F_SERDES },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1162 	  "Intel i82801H (M_AMT) LAN Controller",
   1163 	  WM_T_ICH8,		WMP_F_COPPER },
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1165 	  "Intel i82801H (AMT) LAN Controller",
   1166 	  WM_T_ICH8,		WMP_F_COPPER },
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1168 	  "Intel i82801H LAN Controller",
   1169 	  WM_T_ICH8,		WMP_F_COPPER },
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1171 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1172 	  WM_T_ICH8,		WMP_F_COPPER },
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1174 	  "Intel i82801H (M) LAN Controller",
   1175 	  WM_T_ICH8,		WMP_F_COPPER },
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1177 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1178 	  WM_T_ICH8,		WMP_F_COPPER },
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1180 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1181 	  WM_T_ICH8,		WMP_F_COPPER },
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1183 	  "82567V-3 LAN Controller",
   1184 	  WM_T_ICH8,		WMP_F_COPPER },
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1186 	  "82801I (AMT) LAN Controller",
   1187 	  WM_T_ICH9,		WMP_F_COPPER },
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1189 	  "82801I 10/100 LAN Controller",
   1190 	  WM_T_ICH9,		WMP_F_COPPER },
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1192 	  "82801I (G) 10/100 LAN Controller",
   1193 	  WM_T_ICH9,		WMP_F_COPPER },
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1195 	  "82801I (GT) 10/100 LAN Controller",
   1196 	  WM_T_ICH9,		WMP_F_COPPER },
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1198 	  "82801I (C) LAN Controller",
   1199 	  WM_T_ICH9,		WMP_F_COPPER },
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1201 	  "82801I mobile LAN Controller",
   1202 	  WM_T_ICH9,		WMP_F_COPPER },
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1204 	  "82801I mobile (V) LAN Controller",
   1205 	  WM_T_ICH9,		WMP_F_COPPER },
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1207 	  "82801I mobile (AMT) LAN Controller",
   1208 	  WM_T_ICH9,		WMP_F_COPPER },
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1210 	  "82567LM-4 LAN Controller",
   1211 	  WM_T_ICH9,		WMP_F_COPPER },
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1213 	  "82567LM-2 LAN Controller",
   1214 	  WM_T_ICH10,		WMP_F_COPPER },
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1216 	  "82567LF-2 LAN Controller",
   1217 	  WM_T_ICH10,		WMP_F_COPPER },
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1219 	  "82567LM-3 LAN Controller",
   1220 	  WM_T_ICH10,		WMP_F_COPPER },
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1222 	  "82567LF-3 LAN Controller",
   1223 	  WM_T_ICH10,		WMP_F_COPPER },
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1225 	  "82567V-2 LAN Controller",
   1226 	  WM_T_ICH10,		WMP_F_COPPER },
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1228 	  "82567V-3? LAN Controller",
   1229 	  WM_T_ICH10,		WMP_F_COPPER },
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1231 	  "HANKSVILLE LAN Controller",
   1232 	  WM_T_ICH10,		WMP_F_COPPER },
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1234 	  "PCH LAN (82577LM) Controller",
   1235 	  WM_T_PCH,		WMP_F_COPPER },
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1237 	  "PCH LAN (82577LC) Controller",
   1238 	  WM_T_PCH,		WMP_F_COPPER },
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1240 	  "PCH LAN (82578DM) Controller",
   1241 	  WM_T_PCH,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1243 	  "PCH LAN (82578DC) Controller",
   1244 	  WM_T_PCH,		WMP_F_COPPER },
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1246 	  "PCH2 LAN (82579LM) Controller",
   1247 	  WM_T_PCH2,		WMP_F_COPPER },
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1249 	  "PCH2 LAN (82579V) Controller",
   1250 	  WM_T_PCH2,		WMP_F_COPPER },
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1252 	  "82575EB dual-1000baseT Ethernet",
   1253 	  WM_T_82575,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1255 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1256 	  WM_T_82575,		WMP_F_SERDES },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1258 	  "82575GB quad-1000baseT Ethernet",
   1259 	  WM_T_82575,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1261 	  "82575GB quad-1000baseT Ethernet (PM)",
   1262 	  WM_T_82575,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1264 	  "82576 1000BaseT Ethernet",
   1265 	  WM_T_82576,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1267 	  "82576 1000BaseX Ethernet",
   1268 	  WM_T_82576,		WMP_F_FIBER },
   1269 
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1271 	  "82576 gigabit Ethernet (SERDES)",
   1272 	  WM_T_82576,		WMP_F_SERDES },
   1273 
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1275 	  "82576 quad-1000BaseT Ethernet",
   1276 	  WM_T_82576,		WMP_F_COPPER },
   1277 
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1279 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1280 	  WM_T_82576,		WMP_F_COPPER },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1283 	  "82576 gigabit Ethernet",
   1284 	  WM_T_82576,		WMP_F_COPPER },
   1285 
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1287 	  "82576 gigabit Ethernet (SERDES)",
   1288 	  WM_T_82576,		WMP_F_SERDES },
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1290 	  "82576 quad-gigabit Ethernet (SERDES)",
   1291 	  WM_T_82576,		WMP_F_SERDES },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1294 	  "82580 1000BaseT Ethernet",
   1295 	  WM_T_82580,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1297 	  "82580 1000BaseX Ethernet",
   1298 	  WM_T_82580,		WMP_F_FIBER },
   1299 
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1301 	  "82580 1000BaseT Ethernet (SERDES)",
   1302 	  WM_T_82580,		WMP_F_SERDES },
   1303 
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1305 	  "82580 gigabit Ethernet (SGMII)",
   1306 	  WM_T_82580,		WMP_F_COPPER },
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1308 	  "82580 dual-1000BaseT Ethernet",
   1309 	  WM_T_82580,		WMP_F_COPPER },
   1310 
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1312 	  "82580 quad-1000BaseX Ethernet",
   1313 	  WM_T_82580,		WMP_F_FIBER },
   1314 
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1316 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1317 	  WM_T_82580,		WMP_F_COPPER },
   1318 
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1320 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1321 	  WM_T_82580,		WMP_F_SERDES },
   1322 
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1324 	  "DH89XXCC 1000BASE-KX Ethernet",
   1325 	  WM_T_82580,		WMP_F_SERDES },
   1326 
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1328 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1329 	  WM_T_82580,		WMP_F_SERDES },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1332 	  "I350 Gigabit Network Connection",
   1333 	  WM_T_I350,		WMP_F_COPPER },
   1334 
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1336 	  "I350 Gigabit Fiber Network Connection",
   1337 	  WM_T_I350,		WMP_F_FIBER },
   1338 
   1339 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1340 	  "I350 Gigabit Backplane Connection",
   1341 	  WM_T_I350,		WMP_F_SERDES },
   1342 
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1344 	  "I350 Quad Port Gigabit Ethernet",
   1345 	  WM_T_I350,		WMP_F_SERDES },
   1346 
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1348 	  "I350 Gigabit Connection",
   1349 	  WM_T_I350,		WMP_F_COPPER },
   1350 
   1351 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1352 	  "I354 Gigabit Ethernet (KX)",
   1353 	  WM_T_I354,		WMP_F_SERDES },
   1354 
   1355 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1356 	  "I354 Gigabit Ethernet (SGMII)",
   1357 	  WM_T_I354,		WMP_F_COPPER },
   1358 
   1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1360 	  "I354 Gigabit Ethernet (2.5G)",
   1361 	  WM_T_I354,		WMP_F_COPPER },
   1362 
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1364 	  "I210-T1 Ethernet Server Adapter",
   1365 	  WM_T_I210,		WMP_F_COPPER },
   1366 
   1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1368 	  "I210 Ethernet (Copper OEM)",
   1369 	  WM_T_I210,		WMP_F_COPPER },
   1370 
   1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1372 	  "I210 Ethernet (Copper IT)",
   1373 	  WM_T_I210,		WMP_F_COPPER },
   1374 
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1376 	  "I210 Ethernet (FLASH less)",
   1377 	  WM_T_I210,		WMP_F_COPPER },
   1378 
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1380 	  "I210 Gigabit Ethernet (Fiber)",
   1381 	  WM_T_I210,		WMP_F_FIBER },
   1382 
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1384 	  "I210 Gigabit Ethernet (SERDES)",
   1385 	  WM_T_I210,		WMP_F_SERDES },
   1386 
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1388 	  "I210 Gigabit Ethernet (FLASH less)",
   1389 	  WM_T_I210,		WMP_F_SERDES },
   1390 
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1392 	  "I210 Gigabit Ethernet (SGMII)",
   1393 	  WM_T_I210,		WMP_F_COPPER },
   1394 
   1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1396 	  "I211 Ethernet (COPPER)",
   1397 	  WM_T_I211,		WMP_F_COPPER },
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1399 	  "I217 V Ethernet Connection",
   1400 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1402 	  "I217 LM Ethernet Connection",
   1403 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1405 	  "I218 V Ethernet Connection",
   1406 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1407 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1408 	  "I218 V Ethernet Connection",
   1409 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1411 	  "I218 V Ethernet Connection",
   1412 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1414 	  "I218 LM Ethernet Connection",
   1415 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1417 	  "I218 LM Ethernet Connection",
   1418 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1420 	  "I218 LM Ethernet Connection",
   1421 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1422 #if 0
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1424 	  "I219 V Ethernet Connection",
   1425 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1427 	  "I219 V Ethernet Connection",
   1428 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1430 	  "I219 V Ethernet Connection",
   1431 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1433 	  "I219 V Ethernet Connection",
   1434 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1436 	  "I219 LM Ethernet Connection",
   1437 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1439 	  "I219 LM Ethernet Connection",
   1440 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1442 	  "I219 LM Ethernet Connection",
   1443 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1445 	  "I219 LM Ethernet Connection",
   1446 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1448 	  "I219 LM Ethernet Connection",
   1449 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1450 #endif
   1451 	{ 0,			0,
   1452 	  NULL,
   1453 	  0,			0 },
   1454 };
   1455 
   1456 /*
   1457  * Register read/write functions.
   1458  * Other than CSR_{READ|WRITE}().
   1459  */
   1460 
   1461 #if 0 /* Not currently used */
   1462 static inline uint32_t
   1463 wm_io_read(struct wm_softc *sc, int reg)
   1464 {
   1465 
   1466 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1467 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1468 }
   1469 #endif
   1470 
   1471 static inline void
   1472 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1473 {
   1474 
   1475 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1476 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1477 }
   1478 
   1479 static inline void
   1480 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1481     uint32_t data)
   1482 {
   1483 	uint32_t regval;
   1484 	int i;
   1485 
   1486 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1487 
   1488 	CSR_WRITE(sc, reg, regval);
   1489 
   1490 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1491 		delay(5);
   1492 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1493 			break;
   1494 	}
   1495 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1496 		aprint_error("%s: WARNING:"
   1497 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1498 		    device_xname(sc->sc_dev), reg);
   1499 	}
   1500 }
   1501 
   1502 static inline void
   1503 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1504 {
   1505 	wa->wa_low = htole32(v & 0xffffffffU);
   1506 	if (sizeof(bus_addr_t) == 8)
   1507 		wa->wa_high = htole32((uint64_t) v >> 32);
   1508 	else
   1509 		wa->wa_high = 0;
   1510 }
   1511 
   1512 /*
   1513  * Descriptor sync/init functions.
   1514  */
   1515 static inline void
   1516 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1517 {
   1518 	struct wm_softc *sc = txq->txq_sc;
   1519 
   1520 	/* If it will wrap around, sync to the end of the ring. */
   1521 	if ((start + num) > WM_NTXDESC(txq)) {
   1522 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1523 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1524 		    (WM_NTXDESC(txq) - start), ops);
   1525 		num -= (WM_NTXDESC(txq) - start);
   1526 		start = 0;
   1527 	}
   1528 
   1529 	/* Now sync whatever is left. */
   1530 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1531 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1532 }
   1533 
   1534 static inline void
   1535 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1536 {
   1537 	struct wm_softc *sc = rxq->rxq_sc;
   1538 
   1539 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1540 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1541 }
   1542 
   1543 static inline void
   1544 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1545 {
   1546 	struct wm_softc *sc = rxq->rxq_sc;
   1547 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1548 	struct mbuf *m = rxs->rxs_mbuf;
   1549 
   1550 	/*
   1551 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1552 	 * so that the payload after the Ethernet header is aligned
   1553 	 * to a 4-byte boundary.
   1554 
   1555 	 * XXX BRAINDAMAGE ALERT!
   1556 	 * The stupid chip uses the same size for every buffer, which
   1557 	 * is set in the Receive Control register.  We are using the 2K
   1558 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1559 	 * reason, we can't "scoot" packets longer than the standard
   1560 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1561 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1562 	 * the upper layer copy the headers.
   1563 	 */
   1564 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1565 
   1566 	if (sc->sc_type == WM_T_82574) {
   1567 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1568 		rxd->erx_data.erxd_addr =
   1569 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1570 		rxd->erx_data.erxd_dd = 0;
   1571 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1572 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1573 
   1574 		rxd->nqrx_data.nrxd_paddr =
   1575 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1576 		/* Currently, split header is not supported. */
   1577 		rxd->nqrx_data.nrxd_haddr = 0;
   1578 	} else {
   1579 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1580 
   1581 		wm_set_dma_addr(&rxd->wrx_addr,
   1582 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1583 		rxd->wrx_len = 0;
   1584 		rxd->wrx_cksum = 0;
   1585 		rxd->wrx_status = 0;
   1586 		rxd->wrx_errors = 0;
   1587 		rxd->wrx_special = 0;
   1588 	}
   1589 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1590 
   1591 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1592 }
   1593 
   1594 /*
   1595  * Device driver interface functions and commonly used functions.
   1596  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1597  */
   1598 
   1599 /* Lookup supported device table */
   1600 static const struct wm_product *
   1601 wm_lookup(const struct pci_attach_args *pa)
   1602 {
   1603 	const struct wm_product *wmp;
   1604 
   1605 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1606 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1607 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1608 			return wmp;
   1609 	}
   1610 	return NULL;
   1611 }
   1612 
   1613 /* The match function (ca_match) */
   1614 static int
   1615 wm_match(device_t parent, cfdata_t cf, void *aux)
   1616 {
   1617 	struct pci_attach_args *pa = aux;
   1618 
   1619 	if (wm_lookup(pa) != NULL)
   1620 		return 1;
   1621 
   1622 	return 0;
   1623 }
   1624 
   1625 /* The attach function (ca_attach) */
   1626 static void
   1627 wm_attach(device_t parent, device_t self, void *aux)
   1628 {
   1629 	struct wm_softc *sc = device_private(self);
   1630 	struct pci_attach_args *pa = aux;
   1631 	prop_dictionary_t dict;
   1632 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1633 	pci_chipset_tag_t pc = pa->pa_pc;
   1634 	int counts[PCI_INTR_TYPE_SIZE];
   1635 	pci_intr_type_t max_type;
   1636 	const char *eetype, *xname;
   1637 	bus_space_tag_t memt;
   1638 	bus_space_handle_t memh;
   1639 	bus_size_t memsize;
   1640 	int memh_valid;
   1641 	int i, error;
   1642 	const struct wm_product *wmp;
   1643 	prop_data_t ea;
   1644 	prop_number_t pn;
   1645 	uint8_t enaddr[ETHER_ADDR_LEN];
   1646 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1647 	pcireg_t preg, memtype;
   1648 	uint16_t eeprom_data, apme_mask;
   1649 	bool force_clear_smbi;
   1650 	uint32_t link_mode;
   1651 	uint32_t reg;
   1652 
   1653 	sc->sc_dev = self;
   1654 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1655 	sc->sc_core_stopping = false;
   1656 
   1657 	wmp = wm_lookup(pa);
   1658 #ifdef DIAGNOSTIC
   1659 	if (wmp == NULL) {
   1660 		printf("\n");
   1661 		panic("wm_attach: impossible");
   1662 	}
   1663 #endif
   1664 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1665 
   1666 	sc->sc_pc = pa->pa_pc;
   1667 	sc->sc_pcitag = pa->pa_tag;
   1668 
   1669 	if (pci_dma64_available(pa))
   1670 		sc->sc_dmat = pa->pa_dmat64;
   1671 	else
   1672 		sc->sc_dmat = pa->pa_dmat;
   1673 
   1674 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1675 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1676 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1677 
   1678 	sc->sc_type = wmp->wmp_type;
   1679 
   1680 	/* Set default function pointers */
   1681 	sc->phy.acquire = wm_get_null;
   1682 	sc->phy.release = wm_put_null;
   1683 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1684 
   1685 	if (sc->sc_type < WM_T_82543) {
   1686 		if (sc->sc_rev < 2) {
   1687 			aprint_error_dev(sc->sc_dev,
   1688 			    "i82542 must be at least rev. 2\n");
   1689 			return;
   1690 		}
   1691 		if (sc->sc_rev < 3)
   1692 			sc->sc_type = WM_T_82542_2_0;
   1693 	}
   1694 
   1695 	/*
   1696 	 * Disable MSI for Errata:
   1697 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1698 	 *
   1699 	 *  82544: Errata 25
   1700 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1701 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1702 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1703 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1704 	 *
   1705 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1706 	 *
   1707 	 *  82571 & 82572: Errata 63
   1708 	 */
   1709 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1710 	    || (sc->sc_type == WM_T_82572))
   1711 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1712 
   1713 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1714 	    || (sc->sc_type == WM_T_82580)
   1715 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1716 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1717 		sc->sc_flags |= WM_F_NEWQUEUE;
   1718 
   1719 	/* Set device properties (mactype) */
   1720 	dict = device_properties(sc->sc_dev);
   1721 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1722 
   1723 	/*
   1724 	 * Map the device.  All devices support memory-mapped acccess,
   1725 	 * and it is really required for normal operation.
   1726 	 */
   1727 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1728 	switch (memtype) {
   1729 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1730 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1731 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1732 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1733 		break;
   1734 	default:
   1735 		memh_valid = 0;
   1736 		break;
   1737 	}
   1738 
   1739 	if (memh_valid) {
   1740 		sc->sc_st = memt;
   1741 		sc->sc_sh = memh;
   1742 		sc->sc_ss = memsize;
   1743 	} else {
   1744 		aprint_error_dev(sc->sc_dev,
   1745 		    "unable to map device registers\n");
   1746 		return;
   1747 	}
   1748 
   1749 	/*
   1750 	 * In addition, i82544 and later support I/O mapped indirect
   1751 	 * register access.  It is not desirable (nor supported in
   1752 	 * this driver) to use it for normal operation, though it is
   1753 	 * required to work around bugs in some chip versions.
   1754 	 */
   1755 	if (sc->sc_type >= WM_T_82544) {
   1756 		/* First we have to find the I/O BAR. */
   1757 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1758 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1759 			if (memtype == PCI_MAPREG_TYPE_IO)
   1760 				break;
   1761 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1762 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1763 				i += 4;	/* skip high bits, too */
   1764 		}
   1765 		if (i < PCI_MAPREG_END) {
   1766 			/*
   1767 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1768 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1769 			 * It's no problem because newer chips has no this
   1770 			 * bug.
   1771 			 *
   1772 			 * The i8254x doesn't apparently respond when the
   1773 			 * I/O BAR is 0, which looks somewhat like it's not
   1774 			 * been configured.
   1775 			 */
   1776 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1777 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1778 				aprint_error_dev(sc->sc_dev,
   1779 				    "WARNING: I/O BAR at zero.\n");
   1780 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1781 					0, &sc->sc_iot, &sc->sc_ioh,
   1782 					NULL, &sc->sc_ios) == 0) {
   1783 				sc->sc_flags |= WM_F_IOH_VALID;
   1784 			} else {
   1785 				aprint_error_dev(sc->sc_dev,
   1786 				    "WARNING: unable to map I/O space\n");
   1787 			}
   1788 		}
   1789 
   1790 	}
   1791 
   1792 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1793 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1794 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1795 	if (sc->sc_type < WM_T_82542_2_1)
   1796 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1797 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1798 
   1799 	/* power up chip */
   1800 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1801 	    NULL)) && error != EOPNOTSUPP) {
   1802 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1803 		return;
   1804 	}
   1805 
   1806 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1807 
   1808 	/* Allocation settings */
   1809 	max_type = PCI_INTR_TYPE_MSIX;
   1810 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1811 	counts[PCI_INTR_TYPE_MSI] = 1;
   1812 	counts[PCI_INTR_TYPE_INTX] = 1;
   1813 
   1814 alloc_retry:
   1815 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1816 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1817 		return;
   1818 	}
   1819 
   1820 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1821 		error = wm_setup_msix(sc);
   1822 		if (error) {
   1823 			pci_intr_release(pc, sc->sc_intrs,
   1824 			    counts[PCI_INTR_TYPE_MSIX]);
   1825 
   1826 			/* Setup for MSI: Disable MSI-X */
   1827 			max_type = PCI_INTR_TYPE_MSI;
   1828 			counts[PCI_INTR_TYPE_MSI] = 1;
   1829 			counts[PCI_INTR_TYPE_INTX] = 1;
   1830 			goto alloc_retry;
   1831 		}
   1832 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1833 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1834 		error = wm_setup_legacy(sc);
   1835 		if (error) {
   1836 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1837 			    counts[PCI_INTR_TYPE_MSI]);
   1838 
   1839 			/* The next try is for INTx: Disable MSI */
   1840 			max_type = PCI_INTR_TYPE_INTX;
   1841 			counts[PCI_INTR_TYPE_INTX] = 1;
   1842 			goto alloc_retry;
   1843 		}
   1844 	} else {
   1845 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1846 		error = wm_setup_legacy(sc);
   1847 		if (error) {
   1848 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1849 			    counts[PCI_INTR_TYPE_INTX]);
   1850 			return;
   1851 		}
   1852 	}
   1853 
   1854 	/*
   1855 	 * Check the function ID (unit number of the chip).
   1856 	 */
   1857 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1858 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1859 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1860 	    || (sc->sc_type == WM_T_82580)
   1861 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1862 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1863 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1864 	else
   1865 		sc->sc_funcid = 0;
   1866 
   1867 	/*
   1868 	 * Determine a few things about the bus we're connected to.
   1869 	 */
   1870 	if (sc->sc_type < WM_T_82543) {
   1871 		/* We don't really know the bus characteristics here. */
   1872 		sc->sc_bus_speed = 33;
   1873 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1874 		/*
   1875 		 * CSA (Communication Streaming Architecture) is about as fast
   1876 		 * a 32-bit 66MHz PCI Bus.
   1877 		 */
   1878 		sc->sc_flags |= WM_F_CSA;
   1879 		sc->sc_bus_speed = 66;
   1880 		aprint_verbose_dev(sc->sc_dev,
   1881 		    "Communication Streaming Architecture\n");
   1882 		if (sc->sc_type == WM_T_82547) {
   1883 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1884 			callout_setfunc(&sc->sc_txfifo_ch,
   1885 					wm_82547_txfifo_stall, sc);
   1886 			aprint_verbose_dev(sc->sc_dev,
   1887 			    "using 82547 Tx FIFO stall work-around\n");
   1888 		}
   1889 	} else if (sc->sc_type >= WM_T_82571) {
   1890 		sc->sc_flags |= WM_F_PCIE;
   1891 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1892 		    && (sc->sc_type != WM_T_ICH10)
   1893 		    && (sc->sc_type != WM_T_PCH)
   1894 		    && (sc->sc_type != WM_T_PCH2)
   1895 		    && (sc->sc_type != WM_T_PCH_LPT)
   1896 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1897 			/* ICH* and PCH* have no PCIe capability registers */
   1898 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1899 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1900 				NULL) == 0)
   1901 				aprint_error_dev(sc->sc_dev,
   1902 				    "unable to find PCIe capability\n");
   1903 		}
   1904 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1905 	} else {
   1906 		reg = CSR_READ(sc, WMREG_STATUS);
   1907 		if (reg & STATUS_BUS64)
   1908 			sc->sc_flags |= WM_F_BUS64;
   1909 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1910 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1911 
   1912 			sc->sc_flags |= WM_F_PCIX;
   1913 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1914 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1915 				aprint_error_dev(sc->sc_dev,
   1916 				    "unable to find PCIX capability\n");
   1917 			else if (sc->sc_type != WM_T_82545_3 &&
   1918 				 sc->sc_type != WM_T_82546_3) {
   1919 				/*
   1920 				 * Work around a problem caused by the BIOS
   1921 				 * setting the max memory read byte count
   1922 				 * incorrectly.
   1923 				 */
   1924 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1925 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1926 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1927 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1928 
   1929 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1930 				    PCIX_CMD_BYTECNT_SHIFT;
   1931 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1932 				    PCIX_STATUS_MAXB_SHIFT;
   1933 				if (bytecnt > maxb) {
   1934 					aprint_verbose_dev(sc->sc_dev,
   1935 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1936 					    512 << bytecnt, 512 << maxb);
   1937 					pcix_cmd = (pcix_cmd &
   1938 					    ~PCIX_CMD_BYTECNT_MASK) |
   1939 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1940 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1941 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1942 					    pcix_cmd);
   1943 				}
   1944 			}
   1945 		}
   1946 		/*
   1947 		 * The quad port adapter is special; it has a PCIX-PCIX
   1948 		 * bridge on the board, and can run the secondary bus at
   1949 		 * a higher speed.
   1950 		 */
   1951 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1952 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1953 								      : 66;
   1954 		} else if (sc->sc_flags & WM_F_PCIX) {
   1955 			switch (reg & STATUS_PCIXSPD_MASK) {
   1956 			case STATUS_PCIXSPD_50_66:
   1957 				sc->sc_bus_speed = 66;
   1958 				break;
   1959 			case STATUS_PCIXSPD_66_100:
   1960 				sc->sc_bus_speed = 100;
   1961 				break;
   1962 			case STATUS_PCIXSPD_100_133:
   1963 				sc->sc_bus_speed = 133;
   1964 				break;
   1965 			default:
   1966 				aprint_error_dev(sc->sc_dev,
   1967 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1968 				    reg & STATUS_PCIXSPD_MASK);
   1969 				sc->sc_bus_speed = 66;
   1970 				break;
   1971 			}
   1972 		} else
   1973 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1974 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1975 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1976 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1977 	}
   1978 
   1979 	/* clear interesting stat counters */
   1980 	CSR_READ(sc, WMREG_COLC);
   1981 	CSR_READ(sc, WMREG_RXERRC);
   1982 
   1983 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1984 	    || (sc->sc_type >= WM_T_ICH8))
   1985 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1986 	if (sc->sc_type >= WM_T_ICH8)
   1987 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1988 
   1989 	/* Set PHY, NVM mutex related stuff */
   1990 	switch (sc->sc_type) {
   1991 	case WM_T_82542_2_0:
   1992 	case WM_T_82542_2_1:
   1993 	case WM_T_82543:
   1994 	case WM_T_82544:
   1995 		/* Microwire */
   1996 		sc->sc_nvm_wordsize = 64;
   1997 		sc->sc_nvm_addrbits = 6;
   1998 		break;
   1999 	case WM_T_82540:
   2000 	case WM_T_82545:
   2001 	case WM_T_82545_3:
   2002 	case WM_T_82546:
   2003 	case WM_T_82546_3:
   2004 		/* Microwire */
   2005 		reg = CSR_READ(sc, WMREG_EECD);
   2006 		if (reg & EECD_EE_SIZE) {
   2007 			sc->sc_nvm_wordsize = 256;
   2008 			sc->sc_nvm_addrbits = 8;
   2009 		} else {
   2010 			sc->sc_nvm_wordsize = 64;
   2011 			sc->sc_nvm_addrbits = 6;
   2012 		}
   2013 		sc->sc_flags |= WM_F_LOCK_EECD;
   2014 		break;
   2015 	case WM_T_82541:
   2016 	case WM_T_82541_2:
   2017 	case WM_T_82547:
   2018 	case WM_T_82547_2:
   2019 		sc->sc_flags |= WM_F_LOCK_EECD;
   2020 		reg = CSR_READ(sc, WMREG_EECD);
   2021 		if (reg & EECD_EE_TYPE) {
   2022 			/* SPI */
   2023 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2024 			wm_nvm_set_addrbits_size_eecd(sc);
   2025 		} else {
   2026 			/* Microwire */
   2027 			if ((reg & EECD_EE_ABITS) != 0) {
   2028 				sc->sc_nvm_wordsize = 256;
   2029 				sc->sc_nvm_addrbits = 8;
   2030 			} else {
   2031 				sc->sc_nvm_wordsize = 64;
   2032 				sc->sc_nvm_addrbits = 6;
   2033 			}
   2034 		}
   2035 		break;
   2036 	case WM_T_82571:
   2037 	case WM_T_82572:
   2038 		/* SPI */
   2039 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2040 		wm_nvm_set_addrbits_size_eecd(sc);
   2041 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2042 		sc->phy.acquire = wm_get_swsm_semaphore;
   2043 		sc->phy.release = wm_put_swsm_semaphore;
   2044 		break;
   2045 	case WM_T_82573:
   2046 	case WM_T_82574:
   2047 	case WM_T_82583:
   2048 		if (sc->sc_type == WM_T_82573) {
   2049 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2050 			sc->phy.acquire = wm_get_swsm_semaphore;
   2051 			sc->phy.release = wm_put_swsm_semaphore;
   2052 		} else {
   2053 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2054 			/* Both PHY and NVM use the same semaphore. */
   2055 			sc->phy.acquire
   2056 			    = wm_get_swfwhw_semaphore;
   2057 			sc->phy.release
   2058 			    = wm_put_swfwhw_semaphore;
   2059 		}
   2060 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2061 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2062 			sc->sc_nvm_wordsize = 2048;
   2063 		} else {
   2064 			/* SPI */
   2065 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2066 			wm_nvm_set_addrbits_size_eecd(sc);
   2067 		}
   2068 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2069 		break;
   2070 	case WM_T_82575:
   2071 	case WM_T_82576:
   2072 	case WM_T_82580:
   2073 	case WM_T_I350:
   2074 	case WM_T_I354:
   2075 	case WM_T_80003:
   2076 		/* SPI */
   2077 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2078 		wm_nvm_set_addrbits_size_eecd(sc);
   2079 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2080 		    | WM_F_LOCK_SWSM;
   2081 		sc->phy.acquire = wm_get_phy_82575;
   2082 		sc->phy.release = wm_put_phy_82575;
   2083 		break;
   2084 	case WM_T_ICH8:
   2085 	case WM_T_ICH9:
   2086 	case WM_T_ICH10:
   2087 	case WM_T_PCH:
   2088 	case WM_T_PCH2:
   2089 	case WM_T_PCH_LPT:
   2090 		/* FLASH */
   2091 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2092 		sc->sc_nvm_wordsize = 2048;
   2093 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2094 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2095 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2096 			aprint_error_dev(sc->sc_dev,
   2097 			    "can't map FLASH registers\n");
   2098 			goto out;
   2099 		}
   2100 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2101 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2102 		    ICH_FLASH_SECTOR_SIZE;
   2103 		sc->sc_ich8_flash_bank_size =
   2104 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2105 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2106 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2107 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2108 		sc->sc_flashreg_offset = 0;
   2109 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2110 		sc->phy.release = wm_put_swflag_ich8lan;
   2111 		break;
   2112 	case WM_T_PCH_SPT:
   2113 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2114 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2115 		sc->sc_flasht = sc->sc_st;
   2116 		sc->sc_flashh = sc->sc_sh;
   2117 		sc->sc_ich8_flash_base = 0;
   2118 		sc->sc_nvm_wordsize =
   2119 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2120 			* NVM_SIZE_MULTIPLIER;
   2121 		/* It is size in bytes, we want words */
   2122 		sc->sc_nvm_wordsize /= 2;
   2123 		/* assume 2 banks */
   2124 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2125 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2126 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2127 		sc->phy.release = wm_put_swflag_ich8lan;
   2128 		break;
   2129 	case WM_T_I210:
   2130 	case WM_T_I211:
   2131 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2132 			wm_nvm_set_addrbits_size_eecd(sc);
   2133 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2134 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2135 		} else {
   2136 			sc->sc_nvm_wordsize = INVM_SIZE;
   2137 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2138 		}
   2139 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2140 		sc->phy.acquire = wm_get_phy_82575;
   2141 		sc->phy.release = wm_put_phy_82575;
   2142 		break;
   2143 	default:
   2144 		break;
   2145 	}
   2146 
   2147 	/* Reset the chip to a known state. */
   2148 	wm_reset(sc);
   2149 
   2150 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2151 	switch (sc->sc_type) {
   2152 	case WM_T_82571:
   2153 	case WM_T_82572:
   2154 		reg = CSR_READ(sc, WMREG_SWSM2);
   2155 		if ((reg & SWSM2_LOCK) == 0) {
   2156 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2157 			force_clear_smbi = true;
   2158 		} else
   2159 			force_clear_smbi = false;
   2160 		break;
   2161 	case WM_T_82573:
   2162 	case WM_T_82574:
   2163 	case WM_T_82583:
   2164 		force_clear_smbi = true;
   2165 		break;
   2166 	default:
   2167 		force_clear_smbi = false;
   2168 		break;
   2169 	}
   2170 	if (force_clear_smbi) {
   2171 		reg = CSR_READ(sc, WMREG_SWSM);
   2172 		if ((reg & SWSM_SMBI) != 0)
   2173 			aprint_error_dev(sc->sc_dev,
   2174 			    "Please update the Bootagent\n");
   2175 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2176 	}
   2177 
   2178 	/*
   2179 	 * Defer printing the EEPROM type until after verifying the checksum
   2180 	 * This allows the EEPROM type to be printed correctly in the case
   2181 	 * that no EEPROM is attached.
   2182 	 */
   2183 	/*
   2184 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2185 	 * this for later, so we can fail future reads from the EEPROM.
   2186 	 */
   2187 	if (wm_nvm_validate_checksum(sc)) {
   2188 		/*
   2189 		 * Read twice again because some PCI-e parts fail the
   2190 		 * first check due to the link being in sleep state.
   2191 		 */
   2192 		if (wm_nvm_validate_checksum(sc))
   2193 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2194 	}
   2195 
   2196 	/* Set device properties (macflags) */
   2197 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2198 
   2199 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2200 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2201 	else {
   2202 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2203 		    sc->sc_nvm_wordsize);
   2204 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2205 			aprint_verbose("iNVM");
   2206 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2207 			aprint_verbose("FLASH(HW)");
   2208 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2209 			aprint_verbose("FLASH");
   2210 		else {
   2211 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2212 				eetype = "SPI";
   2213 			else
   2214 				eetype = "MicroWire";
   2215 			aprint_verbose("(%d address bits) %s EEPROM",
   2216 			    sc->sc_nvm_addrbits, eetype);
   2217 		}
   2218 	}
   2219 	wm_nvm_version(sc);
   2220 	aprint_verbose("\n");
   2221 
   2222 	/* Check for I21[01] PLL workaround */
   2223 	if (sc->sc_type == WM_T_I210)
   2224 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2225 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2226 		/* NVM image release 3.25 has a workaround */
   2227 		if ((sc->sc_nvm_ver_major < 3)
   2228 		    || ((sc->sc_nvm_ver_major == 3)
   2229 			&& (sc->sc_nvm_ver_minor < 25))) {
   2230 			aprint_verbose_dev(sc->sc_dev,
   2231 			    "ROM image version %d.%d is older than 3.25\n",
   2232 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2233 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2234 		}
   2235 	}
   2236 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2237 		wm_pll_workaround_i210(sc);
   2238 
   2239 	wm_get_wakeup(sc);
   2240 
   2241 	/* Non-AMT based hardware can now take control from firmware */
   2242 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2243 		wm_get_hw_control(sc);
   2244 
   2245 	/*
   2246 	 * Read the Ethernet address from the EEPROM, if not first found
   2247 	 * in device properties.
   2248 	 */
   2249 	ea = prop_dictionary_get(dict, "mac-address");
   2250 	if (ea != NULL) {
   2251 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2252 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2253 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2254 	} else {
   2255 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2256 			aprint_error_dev(sc->sc_dev,
   2257 			    "unable to read Ethernet address\n");
   2258 			goto out;
   2259 		}
   2260 	}
   2261 
   2262 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2263 	    ether_sprintf(enaddr));
   2264 
   2265 	/*
   2266 	 * Read the config info from the EEPROM, and set up various
   2267 	 * bits in the control registers based on their contents.
   2268 	 */
   2269 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2270 	if (pn != NULL) {
   2271 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2272 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2273 	} else {
   2274 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2275 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2276 			goto out;
   2277 		}
   2278 	}
   2279 
   2280 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2281 	if (pn != NULL) {
   2282 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2283 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2284 	} else {
   2285 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2286 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2287 			goto out;
   2288 		}
   2289 	}
   2290 
   2291 	/* check for WM_F_WOL */
   2292 	switch (sc->sc_type) {
   2293 	case WM_T_82542_2_0:
   2294 	case WM_T_82542_2_1:
   2295 	case WM_T_82543:
   2296 		/* dummy? */
   2297 		eeprom_data = 0;
   2298 		apme_mask = NVM_CFG3_APME;
   2299 		break;
   2300 	case WM_T_82544:
   2301 		apme_mask = NVM_CFG2_82544_APM_EN;
   2302 		eeprom_data = cfg2;
   2303 		break;
   2304 	case WM_T_82546:
   2305 	case WM_T_82546_3:
   2306 	case WM_T_82571:
   2307 	case WM_T_82572:
   2308 	case WM_T_82573:
   2309 	case WM_T_82574:
   2310 	case WM_T_82583:
   2311 	case WM_T_80003:
   2312 	default:
   2313 		apme_mask = NVM_CFG3_APME;
   2314 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2315 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2316 		break;
   2317 	case WM_T_82575:
   2318 	case WM_T_82576:
   2319 	case WM_T_82580:
   2320 	case WM_T_I350:
   2321 	case WM_T_I354: /* XXX ok? */
   2322 	case WM_T_ICH8:
   2323 	case WM_T_ICH9:
   2324 	case WM_T_ICH10:
   2325 	case WM_T_PCH:
   2326 	case WM_T_PCH2:
   2327 	case WM_T_PCH_LPT:
   2328 	case WM_T_PCH_SPT:
   2329 		/* XXX The funcid should be checked on some devices */
   2330 		apme_mask = WUC_APME;
   2331 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2332 		break;
   2333 	}
   2334 
   2335 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2336 	if ((eeprom_data & apme_mask) != 0)
   2337 		sc->sc_flags |= WM_F_WOL;
   2338 #ifdef WM_DEBUG
   2339 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2340 		printf("WOL\n");
   2341 #endif
   2342 
   2343 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2344 		/* Check NVM for autonegotiation */
   2345 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2346 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2347 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2348 		}
   2349 	}
   2350 
   2351 	/*
   2352 	 * XXX need special handling for some multiple port cards
   2353 	 * to disable a paticular port.
   2354 	 */
   2355 
   2356 	if (sc->sc_type >= WM_T_82544) {
   2357 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2358 		if (pn != NULL) {
   2359 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2360 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2361 		} else {
   2362 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2363 				aprint_error_dev(sc->sc_dev,
   2364 				    "unable to read SWDPIN\n");
   2365 				goto out;
   2366 			}
   2367 		}
   2368 	}
   2369 
   2370 	if (cfg1 & NVM_CFG1_ILOS)
   2371 		sc->sc_ctrl |= CTRL_ILOS;
   2372 
   2373 	/*
   2374 	 * XXX
   2375 	 * This code isn't correct because pin 2 and 3 are located
   2376 	 * in different position on newer chips. Check all datasheet.
   2377 	 *
   2378 	 * Until resolve this problem, check if a chip < 82580
   2379 	 */
   2380 	if (sc->sc_type <= WM_T_82580) {
   2381 		if (sc->sc_type >= WM_T_82544) {
   2382 			sc->sc_ctrl |=
   2383 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2384 			    CTRL_SWDPIO_SHIFT;
   2385 			sc->sc_ctrl |=
   2386 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2387 			    CTRL_SWDPINS_SHIFT;
   2388 		} else {
   2389 			sc->sc_ctrl |=
   2390 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2391 			    CTRL_SWDPIO_SHIFT;
   2392 		}
   2393 	}
   2394 
   2395 	/* XXX For other than 82580? */
   2396 	if (sc->sc_type == WM_T_82580) {
   2397 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2398 		if (nvmword & __BIT(13))
   2399 			sc->sc_ctrl |= CTRL_ILOS;
   2400 	}
   2401 
   2402 #if 0
   2403 	if (sc->sc_type >= WM_T_82544) {
   2404 		if (cfg1 & NVM_CFG1_IPS0)
   2405 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2406 		if (cfg1 & NVM_CFG1_IPS1)
   2407 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2408 		sc->sc_ctrl_ext |=
   2409 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2410 		    CTRL_EXT_SWDPIO_SHIFT;
   2411 		sc->sc_ctrl_ext |=
   2412 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2413 		    CTRL_EXT_SWDPINS_SHIFT;
   2414 	} else {
   2415 		sc->sc_ctrl_ext |=
   2416 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2417 		    CTRL_EXT_SWDPIO_SHIFT;
   2418 	}
   2419 #endif
   2420 
   2421 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2422 #if 0
   2423 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2424 #endif
   2425 
   2426 	if (sc->sc_type == WM_T_PCH) {
   2427 		uint16_t val;
   2428 
   2429 		/* Save the NVM K1 bit setting */
   2430 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2431 
   2432 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2433 			sc->sc_nvm_k1_enabled = 1;
   2434 		else
   2435 			sc->sc_nvm_k1_enabled = 0;
   2436 	}
   2437 
   2438 	/*
   2439 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2440 	 * media structures accordingly.
   2441 	 */
   2442 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2443 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2444 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2445 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2446 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2447 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2448 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2449 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2450 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2451 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2452 	    || (sc->sc_type ==WM_T_I211)) {
   2453 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2454 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2455 		switch (link_mode) {
   2456 		case CTRL_EXT_LINK_MODE_1000KX:
   2457 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2458 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2459 			break;
   2460 		case CTRL_EXT_LINK_MODE_SGMII:
   2461 			if (wm_sgmii_uses_mdio(sc)) {
   2462 				aprint_verbose_dev(sc->sc_dev,
   2463 				    "SGMII(MDIO)\n");
   2464 				sc->sc_flags |= WM_F_SGMII;
   2465 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2466 				break;
   2467 			}
   2468 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2469 			/*FALLTHROUGH*/
   2470 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2471 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2472 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2473 				if (link_mode
   2474 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2475 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2476 					sc->sc_flags |= WM_F_SGMII;
   2477 				} else {
   2478 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2479 					aprint_verbose_dev(sc->sc_dev,
   2480 					    "SERDES\n");
   2481 				}
   2482 				break;
   2483 			}
   2484 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2485 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2486 
   2487 			/* Change current link mode setting */
   2488 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2489 			switch (sc->sc_mediatype) {
   2490 			case WM_MEDIATYPE_COPPER:
   2491 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2492 				break;
   2493 			case WM_MEDIATYPE_SERDES:
   2494 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2495 				break;
   2496 			default:
   2497 				break;
   2498 			}
   2499 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2500 			break;
   2501 		case CTRL_EXT_LINK_MODE_GMII:
   2502 		default:
   2503 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2504 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2505 			break;
   2506 		}
   2507 
   2508 		reg &= ~CTRL_EXT_I2C_ENA;
   2509 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2510 			reg |= CTRL_EXT_I2C_ENA;
   2511 		else
   2512 			reg &= ~CTRL_EXT_I2C_ENA;
   2513 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2514 
   2515 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2516 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2517 		else
   2518 			wm_tbi_mediainit(sc);
   2519 	} else if (sc->sc_type < WM_T_82543 ||
   2520 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2521 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2522 			aprint_error_dev(sc->sc_dev,
   2523 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2524 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2525 		}
   2526 		wm_tbi_mediainit(sc);
   2527 	} else {
   2528 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2529 			aprint_error_dev(sc->sc_dev,
   2530 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2531 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2532 		}
   2533 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2534 	}
   2535 
   2536 	ifp = &sc->sc_ethercom.ec_if;
   2537 	xname = device_xname(sc->sc_dev);
   2538 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2539 	ifp->if_softc = sc;
   2540 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2541 	ifp->if_extflags = IFEF_START_MPSAFE;
   2542 	ifp->if_ioctl = wm_ioctl;
   2543 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2544 		ifp->if_start = wm_nq_start;
   2545 		if (sc->sc_nqueues > 1)
   2546 			ifp->if_transmit = wm_nq_transmit;
   2547 	} else {
   2548 		ifp->if_start = wm_start;
   2549 		if (sc->sc_nqueues > 1)
   2550 			ifp->if_transmit = wm_transmit;
   2551 	}
   2552 	ifp->if_watchdog = wm_watchdog;
   2553 	ifp->if_init = wm_init;
   2554 	ifp->if_stop = wm_stop;
   2555 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2556 	IFQ_SET_READY(&ifp->if_snd);
   2557 
   2558 	/* Check for jumbo frame */
   2559 	switch (sc->sc_type) {
   2560 	case WM_T_82573:
   2561 		/* XXX limited to 9234 if ASPM is disabled */
   2562 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2563 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2564 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2565 		break;
   2566 	case WM_T_82571:
   2567 	case WM_T_82572:
   2568 	case WM_T_82574:
   2569 	case WM_T_82575:
   2570 	case WM_T_82576:
   2571 	case WM_T_82580:
   2572 	case WM_T_I350:
   2573 	case WM_T_I354: /* XXXX ok? */
   2574 	case WM_T_I210:
   2575 	case WM_T_I211:
   2576 	case WM_T_80003:
   2577 	case WM_T_ICH9:
   2578 	case WM_T_ICH10:
   2579 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2580 	case WM_T_PCH_LPT:
   2581 	case WM_T_PCH_SPT:
   2582 		/* XXX limited to 9234 */
   2583 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2584 		break;
   2585 	case WM_T_PCH:
   2586 		/* XXX limited to 4096 */
   2587 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2588 		break;
   2589 	case WM_T_82542_2_0:
   2590 	case WM_T_82542_2_1:
   2591 	case WM_T_82583:
   2592 	case WM_T_ICH8:
   2593 		/* No support for jumbo frame */
   2594 		break;
   2595 	default:
   2596 		/* ETHER_MAX_LEN_JUMBO */
   2597 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2598 		break;
   2599 	}
   2600 
   2601 	/* If we're a i82543 or greater, we can support VLANs. */
   2602 	if (sc->sc_type >= WM_T_82543)
   2603 		sc->sc_ethercom.ec_capabilities |=
   2604 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2605 
   2606 	/*
   2607 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2608 	 * on i82543 and later.
   2609 	 */
   2610 	if (sc->sc_type >= WM_T_82543) {
   2611 		ifp->if_capabilities |=
   2612 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2613 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2614 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2615 		    IFCAP_CSUM_TCPv6_Tx |
   2616 		    IFCAP_CSUM_UDPv6_Tx;
   2617 	}
   2618 
   2619 	/*
   2620 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2621 	 *
   2622 	 *	82541GI (8086:1076) ... no
   2623 	 *	82572EI (8086:10b9) ... yes
   2624 	 */
   2625 	if (sc->sc_type >= WM_T_82571) {
   2626 		ifp->if_capabilities |=
   2627 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2628 	}
   2629 
   2630 	/*
   2631 	 * If we're a i82544 or greater (except i82547), we can do
   2632 	 * TCP segmentation offload.
   2633 	 */
   2634 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2635 		ifp->if_capabilities |= IFCAP_TSOv4;
   2636 	}
   2637 
   2638 	if (sc->sc_type >= WM_T_82571) {
   2639 		ifp->if_capabilities |= IFCAP_TSOv6;
   2640 	}
   2641 
   2642 #ifdef WM_MPSAFE
   2643 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2644 #else
   2645 	sc->sc_core_lock = NULL;
   2646 #endif
   2647 
   2648 	/* Attach the interface. */
   2649 	if_initialize(ifp);
   2650 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2651 	ether_ifattach(ifp, enaddr);
   2652 	if_register(ifp);
   2653 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2654 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2655 			  RND_FLAG_DEFAULT);
   2656 
   2657 #ifdef WM_EVENT_COUNTERS
   2658 	/* Attach event counters. */
   2659 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2660 	    NULL, xname, "linkintr");
   2661 
   2662 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2663 	    NULL, xname, "tx_xoff");
   2664 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2665 	    NULL, xname, "tx_xon");
   2666 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2667 	    NULL, xname, "rx_xoff");
   2668 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2669 	    NULL, xname, "rx_xon");
   2670 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2671 	    NULL, xname, "rx_macctl");
   2672 #endif /* WM_EVENT_COUNTERS */
   2673 
   2674 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2675 		pmf_class_network_register(self, ifp);
   2676 	else
   2677 		aprint_error_dev(self, "couldn't establish power handler\n");
   2678 
   2679 	sc->sc_flags |= WM_F_ATTACHED;
   2680  out:
   2681 	return;
   2682 }
   2683 
   2684 /* The detach function (ca_detach) */
   2685 static int
   2686 wm_detach(device_t self, int flags __unused)
   2687 {
   2688 	struct wm_softc *sc = device_private(self);
   2689 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2690 	int i;
   2691 
   2692 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2693 		return 0;
   2694 
   2695 	/* Stop the interface. Callouts are stopped in it. */
   2696 	wm_stop(ifp, 1);
   2697 
   2698 	pmf_device_deregister(self);
   2699 
   2700 #ifdef WM_EVENT_COUNTERS
   2701 	evcnt_detach(&sc->sc_ev_linkintr);
   2702 
   2703 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2704 	evcnt_detach(&sc->sc_ev_tx_xon);
   2705 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2706 	evcnt_detach(&sc->sc_ev_rx_xon);
   2707 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2708 #endif /* WM_EVENT_COUNTERS */
   2709 
   2710 	/* Tell the firmware about the release */
   2711 	WM_CORE_LOCK(sc);
   2712 	wm_release_manageability(sc);
   2713 	wm_release_hw_control(sc);
   2714 	wm_enable_wakeup(sc);
   2715 	WM_CORE_UNLOCK(sc);
   2716 
   2717 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2718 
   2719 	/* Delete all remaining media. */
   2720 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2721 
   2722 	ether_ifdetach(ifp);
   2723 	if_detach(ifp);
   2724 	if_percpuq_destroy(sc->sc_ipq);
   2725 
   2726 	/* Unload RX dmamaps and free mbufs */
   2727 	for (i = 0; i < sc->sc_nqueues; i++) {
   2728 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2729 		mutex_enter(rxq->rxq_lock);
   2730 		wm_rxdrain(rxq);
   2731 		mutex_exit(rxq->rxq_lock);
   2732 	}
   2733 	/* Must unlock here */
   2734 
   2735 	/* Disestablish the interrupt handler */
   2736 	for (i = 0; i < sc->sc_nintrs; i++) {
   2737 		if (sc->sc_ihs[i] != NULL) {
   2738 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2739 			sc->sc_ihs[i] = NULL;
   2740 		}
   2741 	}
   2742 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2743 
   2744 	wm_free_txrx_queues(sc);
   2745 
   2746 	/* Unmap the registers */
   2747 	if (sc->sc_ss) {
   2748 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2749 		sc->sc_ss = 0;
   2750 	}
   2751 	if (sc->sc_ios) {
   2752 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2753 		sc->sc_ios = 0;
   2754 	}
   2755 	if (sc->sc_flashs) {
   2756 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2757 		sc->sc_flashs = 0;
   2758 	}
   2759 
   2760 	if (sc->sc_core_lock)
   2761 		mutex_obj_free(sc->sc_core_lock);
   2762 	if (sc->sc_ich_phymtx)
   2763 		mutex_obj_free(sc->sc_ich_phymtx);
   2764 	if (sc->sc_ich_nvmmtx)
   2765 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2766 
   2767 	return 0;
   2768 }
   2769 
   2770 static bool
   2771 wm_suspend(device_t self, const pmf_qual_t *qual)
   2772 {
   2773 	struct wm_softc *sc = device_private(self);
   2774 
   2775 	wm_release_manageability(sc);
   2776 	wm_release_hw_control(sc);
   2777 	wm_enable_wakeup(sc);
   2778 
   2779 	return true;
   2780 }
   2781 
   2782 static bool
   2783 wm_resume(device_t self, const pmf_qual_t *qual)
   2784 {
   2785 	struct wm_softc *sc = device_private(self);
   2786 
   2787 	wm_init_manageability(sc);
   2788 
   2789 	return true;
   2790 }
   2791 
   2792 /*
   2793  * wm_watchdog:		[ifnet interface function]
   2794  *
   2795  *	Watchdog timer handler.
   2796  */
   2797 static void
   2798 wm_watchdog(struct ifnet *ifp)
   2799 {
   2800 	int qid;
   2801 	struct wm_softc *sc = ifp->if_softc;
   2802 
   2803 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2804 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2805 
   2806 		wm_watchdog_txq(ifp, txq);
   2807 	}
   2808 
   2809 	/* Reset the interface. */
   2810 	(void) wm_init(ifp);
   2811 
   2812 	/*
   2813 	 * There are still some upper layer processing which call
   2814 	 * ifp->if_start(). e.g. ALTQ
   2815 	 */
   2816 	/* Try to get more packets going. */
   2817 	ifp->if_start(ifp);
   2818 }
   2819 
   2820 static void
   2821 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2822 {
   2823 	struct wm_softc *sc = ifp->if_softc;
   2824 
   2825 	/*
   2826 	 * Since we're using delayed interrupts, sweep up
   2827 	 * before we report an error.
   2828 	 */
   2829 	mutex_enter(txq->txq_lock);
   2830 	wm_txeof(sc, txq);
   2831 	mutex_exit(txq->txq_lock);
   2832 
   2833 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2834 #ifdef WM_DEBUG
   2835 		int i, j;
   2836 		struct wm_txsoft *txs;
   2837 #endif
   2838 		log(LOG_ERR,
   2839 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2840 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2841 		    txq->txq_next);
   2842 		ifp->if_oerrors++;
   2843 #ifdef WM_DEBUG
   2844 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2845 		    i = WM_NEXTTXS(txq, i)) {
   2846 		    txs = &txq->txq_soft[i];
   2847 		    printf("txs %d tx %d -> %d\n",
   2848 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2849 		    for (j = txs->txs_firstdesc; ;
   2850 			j = WM_NEXTTX(txq, j)) {
   2851 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2852 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2853 			printf("\t %#08x%08x\n",
   2854 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2855 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2856 			if (j == txs->txs_lastdesc)
   2857 				break;
   2858 			}
   2859 		}
   2860 #endif
   2861 	}
   2862 }
   2863 
   2864 /*
   2865  * wm_tick:
   2866  *
   2867  *	One second timer, used to check link status, sweep up
   2868  *	completed transmit jobs, etc.
   2869  */
   2870 static void
   2871 wm_tick(void *arg)
   2872 {
   2873 	struct wm_softc *sc = arg;
   2874 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2875 #ifndef WM_MPSAFE
   2876 	int s = splnet();
   2877 #endif
   2878 
   2879 	WM_CORE_LOCK(sc);
   2880 
   2881 	if (sc->sc_core_stopping)
   2882 		goto out;
   2883 
   2884 	if (sc->sc_type >= WM_T_82542_2_1) {
   2885 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2886 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2887 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2888 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2889 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2890 	}
   2891 
   2892 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2893 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2894 	    + CSR_READ(sc, WMREG_CRCERRS)
   2895 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2896 	    + CSR_READ(sc, WMREG_SYMERRC)
   2897 	    + CSR_READ(sc, WMREG_RXERRC)
   2898 	    + CSR_READ(sc, WMREG_SEC)
   2899 	    + CSR_READ(sc, WMREG_CEXTERR)
   2900 	    + CSR_READ(sc, WMREG_RLEC);
   2901 	/*
   2902 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2903 	 * memory. It does not mean the number of dropped packet. Because
   2904 	 * ethernet controller can receive packets in such case if there is
   2905 	 * space in phy's FIFO.
   2906 	 *
   2907 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2908 	 * own EVCNT instead of if_iqdrops.
   2909 	 */
   2910 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2911 
   2912 	if (sc->sc_flags & WM_F_HAS_MII)
   2913 		mii_tick(&sc->sc_mii);
   2914 	else if ((sc->sc_type >= WM_T_82575)
   2915 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2916 		wm_serdes_tick(sc);
   2917 	else
   2918 		wm_tbi_tick(sc);
   2919 
   2920 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2921 out:
   2922 	WM_CORE_UNLOCK(sc);
   2923 #ifndef WM_MPSAFE
   2924 	splx(s);
   2925 #endif
   2926 }
   2927 
   2928 static int
   2929 wm_ifflags_cb(struct ethercom *ec)
   2930 {
   2931 	struct ifnet *ifp = &ec->ec_if;
   2932 	struct wm_softc *sc = ifp->if_softc;
   2933 	int rc = 0;
   2934 
   2935 	WM_CORE_LOCK(sc);
   2936 
   2937 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2938 	sc->sc_if_flags = ifp->if_flags;
   2939 
   2940 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2941 		rc = ENETRESET;
   2942 		goto out;
   2943 	}
   2944 
   2945 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2946 		wm_set_filter(sc);
   2947 
   2948 	wm_set_vlan(sc);
   2949 
   2950 out:
   2951 	WM_CORE_UNLOCK(sc);
   2952 
   2953 	return rc;
   2954 }
   2955 
   2956 /*
   2957  * wm_ioctl:		[ifnet interface function]
   2958  *
   2959  *	Handle control requests from the operator.
   2960  */
   2961 static int
   2962 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2963 {
   2964 	struct wm_softc *sc = ifp->if_softc;
   2965 	struct ifreq *ifr = (struct ifreq *) data;
   2966 	struct ifaddr *ifa = (struct ifaddr *)data;
   2967 	struct sockaddr_dl *sdl;
   2968 	int s, error;
   2969 
   2970 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2971 		device_xname(sc->sc_dev), __func__));
   2972 
   2973 #ifndef WM_MPSAFE
   2974 	s = splnet();
   2975 #endif
   2976 	switch (cmd) {
   2977 	case SIOCSIFMEDIA:
   2978 	case SIOCGIFMEDIA:
   2979 		WM_CORE_LOCK(sc);
   2980 		/* Flow control requires full-duplex mode. */
   2981 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2982 		    (ifr->ifr_media & IFM_FDX) == 0)
   2983 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2984 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2985 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2986 				/* We can do both TXPAUSE and RXPAUSE. */
   2987 				ifr->ifr_media |=
   2988 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2989 			}
   2990 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2991 		}
   2992 		WM_CORE_UNLOCK(sc);
   2993 #ifdef WM_MPSAFE
   2994 		s = splnet();
   2995 #endif
   2996 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2997 #ifdef WM_MPSAFE
   2998 		splx(s);
   2999 #endif
   3000 		break;
   3001 	case SIOCINITIFADDR:
   3002 		WM_CORE_LOCK(sc);
   3003 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3004 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3005 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3006 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3007 			/* unicast address is first multicast entry */
   3008 			wm_set_filter(sc);
   3009 			error = 0;
   3010 			WM_CORE_UNLOCK(sc);
   3011 			break;
   3012 		}
   3013 		WM_CORE_UNLOCK(sc);
   3014 		/*FALLTHROUGH*/
   3015 	default:
   3016 #ifdef WM_MPSAFE
   3017 		s = splnet();
   3018 #endif
   3019 		/* It may call wm_start, so unlock here */
   3020 		error = ether_ioctl(ifp, cmd, data);
   3021 #ifdef WM_MPSAFE
   3022 		splx(s);
   3023 #endif
   3024 		if (error != ENETRESET)
   3025 			break;
   3026 
   3027 		error = 0;
   3028 
   3029 		if (cmd == SIOCSIFCAP) {
   3030 			error = (*ifp->if_init)(ifp);
   3031 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3032 			;
   3033 		else if (ifp->if_flags & IFF_RUNNING) {
   3034 			/*
   3035 			 * Multicast list has changed; set the hardware filter
   3036 			 * accordingly.
   3037 			 */
   3038 			WM_CORE_LOCK(sc);
   3039 			wm_set_filter(sc);
   3040 			WM_CORE_UNLOCK(sc);
   3041 		}
   3042 		break;
   3043 	}
   3044 
   3045 #ifndef WM_MPSAFE
   3046 	splx(s);
   3047 #endif
   3048 	return error;
   3049 }
   3050 
   3051 /* MAC address related */
   3052 
   3053 /*
   3054  * Get the offset of MAC address and return it.
   3055  * If error occured, use offset 0.
   3056  */
   3057 static uint16_t
   3058 wm_check_alt_mac_addr(struct wm_softc *sc)
   3059 {
   3060 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3061 	uint16_t offset = NVM_OFF_MACADDR;
   3062 
   3063 	/* Try to read alternative MAC address pointer */
   3064 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3065 		return 0;
   3066 
   3067 	/* Check pointer if it's valid or not. */
   3068 	if ((offset == 0x0000) || (offset == 0xffff))
   3069 		return 0;
   3070 
   3071 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3072 	/*
   3073 	 * Check whether alternative MAC address is valid or not.
   3074 	 * Some cards have non 0xffff pointer but those don't use
   3075 	 * alternative MAC address in reality.
   3076 	 *
   3077 	 * Check whether the broadcast bit is set or not.
   3078 	 */
   3079 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3080 		if (((myea[0] & 0xff) & 0x01) == 0)
   3081 			return offset; /* Found */
   3082 
   3083 	/* Not found */
   3084 	return 0;
   3085 }
   3086 
   3087 static int
   3088 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3089 {
   3090 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3091 	uint16_t offset = NVM_OFF_MACADDR;
   3092 	int do_invert = 0;
   3093 
   3094 	switch (sc->sc_type) {
   3095 	case WM_T_82580:
   3096 	case WM_T_I350:
   3097 	case WM_T_I354:
   3098 		/* EEPROM Top Level Partitioning */
   3099 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3100 		break;
   3101 	case WM_T_82571:
   3102 	case WM_T_82575:
   3103 	case WM_T_82576:
   3104 	case WM_T_80003:
   3105 	case WM_T_I210:
   3106 	case WM_T_I211:
   3107 		offset = wm_check_alt_mac_addr(sc);
   3108 		if (offset == 0)
   3109 			if ((sc->sc_funcid & 0x01) == 1)
   3110 				do_invert = 1;
   3111 		break;
   3112 	default:
   3113 		if ((sc->sc_funcid & 0x01) == 1)
   3114 			do_invert = 1;
   3115 		break;
   3116 	}
   3117 
   3118 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3119 		goto bad;
   3120 
   3121 	enaddr[0] = myea[0] & 0xff;
   3122 	enaddr[1] = myea[0] >> 8;
   3123 	enaddr[2] = myea[1] & 0xff;
   3124 	enaddr[3] = myea[1] >> 8;
   3125 	enaddr[4] = myea[2] & 0xff;
   3126 	enaddr[5] = myea[2] >> 8;
   3127 
   3128 	/*
   3129 	 * Toggle the LSB of the MAC address on the second port
   3130 	 * of some dual port cards.
   3131 	 */
   3132 	if (do_invert != 0)
   3133 		enaddr[5] ^= 1;
   3134 
   3135 	return 0;
   3136 
   3137  bad:
   3138 	return -1;
   3139 }
   3140 
   3141 /*
   3142  * wm_set_ral:
   3143  *
   3144  *	Set an entery in the receive address list.
   3145  */
   3146 static void
   3147 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3148 {
   3149 	uint32_t ral_lo, ral_hi;
   3150 
   3151 	if (enaddr != NULL) {
   3152 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3153 		    (enaddr[3] << 24);
   3154 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3155 		ral_hi |= RAL_AV;
   3156 	} else {
   3157 		ral_lo = 0;
   3158 		ral_hi = 0;
   3159 	}
   3160 
   3161 	if (sc->sc_type >= WM_T_82544) {
   3162 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3163 		    ral_lo);
   3164 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3165 		    ral_hi);
   3166 	} else {
   3167 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3168 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3169 	}
   3170 }
   3171 
   3172 /*
   3173  * wm_mchash:
   3174  *
   3175  *	Compute the hash of the multicast address for the 4096-bit
   3176  *	multicast filter.
   3177  */
   3178 static uint32_t
   3179 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3180 {
   3181 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3182 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3183 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3184 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3185 	uint32_t hash;
   3186 
   3187 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3188 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3189 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3190 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3191 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3192 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3193 		return (hash & 0x3ff);
   3194 	}
   3195 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3196 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3197 
   3198 	return (hash & 0xfff);
   3199 }
   3200 
   3201 /*
   3202  * wm_set_filter:
   3203  *
   3204  *	Set up the receive filter.
   3205  */
   3206 static void
   3207 wm_set_filter(struct wm_softc *sc)
   3208 {
   3209 	struct ethercom *ec = &sc->sc_ethercom;
   3210 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3211 	struct ether_multi *enm;
   3212 	struct ether_multistep step;
   3213 	bus_addr_t mta_reg;
   3214 	uint32_t hash, reg, bit;
   3215 	int i, size, ralmax;
   3216 
   3217 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3218 		device_xname(sc->sc_dev), __func__));
   3219 
   3220 	if (sc->sc_type >= WM_T_82544)
   3221 		mta_reg = WMREG_CORDOVA_MTA;
   3222 	else
   3223 		mta_reg = WMREG_MTA;
   3224 
   3225 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3226 
   3227 	if (ifp->if_flags & IFF_BROADCAST)
   3228 		sc->sc_rctl |= RCTL_BAM;
   3229 	if (ifp->if_flags & IFF_PROMISC) {
   3230 		sc->sc_rctl |= RCTL_UPE;
   3231 		goto allmulti;
   3232 	}
   3233 
   3234 	/*
   3235 	 * Set the station address in the first RAL slot, and
   3236 	 * clear the remaining slots.
   3237 	 */
   3238 	if (sc->sc_type == WM_T_ICH8)
   3239 		size = WM_RAL_TABSIZE_ICH8 -1;
   3240 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3241 	    || (sc->sc_type == WM_T_PCH))
   3242 		size = WM_RAL_TABSIZE_ICH8;
   3243 	else if (sc->sc_type == WM_T_PCH2)
   3244 		size = WM_RAL_TABSIZE_PCH2;
   3245 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3246 		size = WM_RAL_TABSIZE_PCH_LPT;
   3247 	else if (sc->sc_type == WM_T_82575)
   3248 		size = WM_RAL_TABSIZE_82575;
   3249 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3250 		size = WM_RAL_TABSIZE_82576;
   3251 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3252 		size = WM_RAL_TABSIZE_I350;
   3253 	else
   3254 		size = WM_RAL_TABSIZE;
   3255 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3256 
   3257 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3258 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3259 		switch (i) {
   3260 		case 0:
   3261 			/* We can use all entries */
   3262 			ralmax = size;
   3263 			break;
   3264 		case 1:
   3265 			/* Only RAR[0] */
   3266 			ralmax = 1;
   3267 			break;
   3268 		default:
   3269 			/* available SHRA + RAR[0] */
   3270 			ralmax = i + 1;
   3271 		}
   3272 	} else
   3273 		ralmax = size;
   3274 	for (i = 1; i < size; i++) {
   3275 		if (i < ralmax)
   3276 			wm_set_ral(sc, NULL, i);
   3277 	}
   3278 
   3279 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3280 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3281 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3282 	    || (sc->sc_type == WM_T_PCH_SPT))
   3283 		size = WM_ICH8_MC_TABSIZE;
   3284 	else
   3285 		size = WM_MC_TABSIZE;
   3286 	/* Clear out the multicast table. */
   3287 	for (i = 0; i < size; i++)
   3288 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3289 
   3290 	ETHER_LOCK(ec);
   3291 	ETHER_FIRST_MULTI(step, ec, enm);
   3292 	while (enm != NULL) {
   3293 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3294 			ETHER_UNLOCK(ec);
   3295 			/*
   3296 			 * We must listen to a range of multicast addresses.
   3297 			 * For now, just accept all multicasts, rather than
   3298 			 * trying to set only those filter bits needed to match
   3299 			 * the range.  (At this time, the only use of address
   3300 			 * ranges is for IP multicast routing, for which the
   3301 			 * range is big enough to require all bits set.)
   3302 			 */
   3303 			goto allmulti;
   3304 		}
   3305 
   3306 		hash = wm_mchash(sc, enm->enm_addrlo);
   3307 
   3308 		reg = (hash >> 5);
   3309 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3310 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3311 		    || (sc->sc_type == WM_T_PCH2)
   3312 		    || (sc->sc_type == WM_T_PCH_LPT)
   3313 		    || (sc->sc_type == WM_T_PCH_SPT))
   3314 			reg &= 0x1f;
   3315 		else
   3316 			reg &= 0x7f;
   3317 		bit = hash & 0x1f;
   3318 
   3319 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3320 		hash |= 1U << bit;
   3321 
   3322 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3323 			/*
   3324 			 * 82544 Errata 9: Certain register cannot be written
   3325 			 * with particular alignments in PCI-X bus operation
   3326 			 * (FCAH, MTA and VFTA).
   3327 			 */
   3328 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3329 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3330 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3331 		} else
   3332 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3333 
   3334 		ETHER_NEXT_MULTI(step, enm);
   3335 	}
   3336 	ETHER_UNLOCK(ec);
   3337 
   3338 	ifp->if_flags &= ~IFF_ALLMULTI;
   3339 	goto setit;
   3340 
   3341  allmulti:
   3342 	ifp->if_flags |= IFF_ALLMULTI;
   3343 	sc->sc_rctl |= RCTL_MPE;
   3344 
   3345  setit:
   3346 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3347 }
   3348 
   3349 /* Reset and init related */
   3350 
   3351 static void
   3352 wm_set_vlan(struct wm_softc *sc)
   3353 {
   3354 
   3355 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3356 		device_xname(sc->sc_dev), __func__));
   3357 
   3358 	/* Deal with VLAN enables. */
   3359 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3360 		sc->sc_ctrl |= CTRL_VME;
   3361 	else
   3362 		sc->sc_ctrl &= ~CTRL_VME;
   3363 
   3364 	/* Write the control registers. */
   3365 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3366 }
   3367 
   3368 static void
   3369 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3370 {
   3371 	uint32_t gcr;
   3372 	pcireg_t ctrl2;
   3373 
   3374 	gcr = CSR_READ(sc, WMREG_GCR);
   3375 
   3376 	/* Only take action if timeout value is defaulted to 0 */
   3377 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3378 		goto out;
   3379 
   3380 	if ((gcr & GCR_CAP_VER2) == 0) {
   3381 		gcr |= GCR_CMPL_TMOUT_10MS;
   3382 		goto out;
   3383 	}
   3384 
   3385 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3386 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3387 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3388 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3389 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3390 
   3391 out:
   3392 	/* Disable completion timeout resend */
   3393 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3394 
   3395 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3396 }
   3397 
   3398 void
   3399 wm_get_auto_rd_done(struct wm_softc *sc)
   3400 {
   3401 	int i;
   3402 
   3403 	/* wait for eeprom to reload */
   3404 	switch (sc->sc_type) {
   3405 	case WM_T_82571:
   3406 	case WM_T_82572:
   3407 	case WM_T_82573:
   3408 	case WM_T_82574:
   3409 	case WM_T_82583:
   3410 	case WM_T_82575:
   3411 	case WM_T_82576:
   3412 	case WM_T_82580:
   3413 	case WM_T_I350:
   3414 	case WM_T_I354:
   3415 	case WM_T_I210:
   3416 	case WM_T_I211:
   3417 	case WM_T_80003:
   3418 	case WM_T_ICH8:
   3419 	case WM_T_ICH9:
   3420 		for (i = 0; i < 10; i++) {
   3421 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3422 				break;
   3423 			delay(1000);
   3424 		}
   3425 		if (i == 10) {
   3426 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3427 			    "complete\n", device_xname(sc->sc_dev));
   3428 		}
   3429 		break;
   3430 	default:
   3431 		break;
   3432 	}
   3433 }
   3434 
   3435 void
   3436 wm_lan_init_done(struct wm_softc *sc)
   3437 {
   3438 	uint32_t reg = 0;
   3439 	int i;
   3440 
   3441 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3442 		device_xname(sc->sc_dev), __func__));
   3443 
   3444 	/* Wait for eeprom to reload */
   3445 	switch (sc->sc_type) {
   3446 	case WM_T_ICH10:
   3447 	case WM_T_PCH:
   3448 	case WM_T_PCH2:
   3449 	case WM_T_PCH_LPT:
   3450 	case WM_T_PCH_SPT:
   3451 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3452 			reg = CSR_READ(sc, WMREG_STATUS);
   3453 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3454 				break;
   3455 			delay(100);
   3456 		}
   3457 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3458 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3459 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3460 		}
   3461 		break;
   3462 	default:
   3463 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3464 		    __func__);
   3465 		break;
   3466 	}
   3467 
   3468 	reg &= ~STATUS_LAN_INIT_DONE;
   3469 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3470 }
   3471 
   3472 void
   3473 wm_get_cfg_done(struct wm_softc *sc)
   3474 {
   3475 	int mask;
   3476 	uint32_t reg;
   3477 	int i;
   3478 
   3479 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3480 		device_xname(sc->sc_dev), __func__));
   3481 
   3482 	/* Wait for eeprom to reload */
   3483 	switch (sc->sc_type) {
   3484 	case WM_T_82542_2_0:
   3485 	case WM_T_82542_2_1:
   3486 		/* null */
   3487 		break;
   3488 	case WM_T_82543:
   3489 	case WM_T_82544:
   3490 	case WM_T_82540:
   3491 	case WM_T_82545:
   3492 	case WM_T_82545_3:
   3493 	case WM_T_82546:
   3494 	case WM_T_82546_3:
   3495 	case WM_T_82541:
   3496 	case WM_T_82541_2:
   3497 	case WM_T_82547:
   3498 	case WM_T_82547_2:
   3499 	case WM_T_82573:
   3500 	case WM_T_82574:
   3501 	case WM_T_82583:
   3502 		/* generic */
   3503 		delay(10*1000);
   3504 		break;
   3505 	case WM_T_80003:
   3506 	case WM_T_82571:
   3507 	case WM_T_82572:
   3508 	case WM_T_82575:
   3509 	case WM_T_82576:
   3510 	case WM_T_82580:
   3511 	case WM_T_I350:
   3512 	case WM_T_I354:
   3513 	case WM_T_I210:
   3514 	case WM_T_I211:
   3515 		if (sc->sc_type == WM_T_82571) {
   3516 			/* Only 82571 shares port 0 */
   3517 			mask = EEMNGCTL_CFGDONE_0;
   3518 		} else
   3519 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3520 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3521 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3522 				break;
   3523 			delay(1000);
   3524 		}
   3525 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3526 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3527 				device_xname(sc->sc_dev), __func__));
   3528 		}
   3529 		break;
   3530 	case WM_T_ICH8:
   3531 	case WM_T_ICH9:
   3532 	case WM_T_ICH10:
   3533 	case WM_T_PCH:
   3534 	case WM_T_PCH2:
   3535 	case WM_T_PCH_LPT:
   3536 	case WM_T_PCH_SPT:
   3537 		delay(10*1000);
   3538 		if (sc->sc_type >= WM_T_ICH10)
   3539 			wm_lan_init_done(sc);
   3540 		else
   3541 			wm_get_auto_rd_done(sc);
   3542 
   3543 		reg = CSR_READ(sc, WMREG_STATUS);
   3544 		if ((reg & STATUS_PHYRA) != 0)
   3545 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3546 		break;
   3547 	default:
   3548 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3549 		    __func__);
   3550 		break;
   3551 	}
   3552 }
   3553 
   3554 /* Init hardware bits */
   3555 void
   3556 wm_initialize_hardware_bits(struct wm_softc *sc)
   3557 {
   3558 	uint32_t tarc0, tarc1, reg;
   3559 
   3560 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3561 		device_xname(sc->sc_dev), __func__));
   3562 
   3563 	/* For 82571 variant, 80003 and ICHs */
   3564 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3565 	    || (sc->sc_type >= WM_T_80003)) {
   3566 
   3567 		/* Transmit Descriptor Control 0 */
   3568 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3569 		reg |= TXDCTL_COUNT_DESC;
   3570 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3571 
   3572 		/* Transmit Descriptor Control 1 */
   3573 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3574 		reg |= TXDCTL_COUNT_DESC;
   3575 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3576 
   3577 		/* TARC0 */
   3578 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3579 		switch (sc->sc_type) {
   3580 		case WM_T_82571:
   3581 		case WM_T_82572:
   3582 		case WM_T_82573:
   3583 		case WM_T_82574:
   3584 		case WM_T_82583:
   3585 		case WM_T_80003:
   3586 			/* Clear bits 30..27 */
   3587 			tarc0 &= ~__BITS(30, 27);
   3588 			break;
   3589 		default:
   3590 			break;
   3591 		}
   3592 
   3593 		switch (sc->sc_type) {
   3594 		case WM_T_82571:
   3595 		case WM_T_82572:
   3596 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3597 
   3598 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3599 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3600 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3601 			/* 8257[12] Errata No.7 */
   3602 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3603 
   3604 			/* TARC1 bit 28 */
   3605 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3606 				tarc1 &= ~__BIT(28);
   3607 			else
   3608 				tarc1 |= __BIT(28);
   3609 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3610 
   3611 			/*
   3612 			 * 8257[12] Errata No.13
   3613 			 * Disable Dyamic Clock Gating.
   3614 			 */
   3615 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3616 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3617 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3618 			break;
   3619 		case WM_T_82573:
   3620 		case WM_T_82574:
   3621 		case WM_T_82583:
   3622 			if ((sc->sc_type == WM_T_82574)
   3623 			    || (sc->sc_type == WM_T_82583))
   3624 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3625 
   3626 			/* Extended Device Control */
   3627 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3628 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3629 			reg |= __BIT(22);	/* Set bit 22 */
   3630 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3631 
   3632 			/* Device Control */
   3633 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3634 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3635 
   3636 			/* PCIe Control Register */
   3637 			/*
   3638 			 * 82573 Errata (unknown).
   3639 			 *
   3640 			 * 82574 Errata 25 and 82583 Errata 12
   3641 			 * "Dropped Rx Packets":
   3642 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3643 			 */
   3644 			reg = CSR_READ(sc, WMREG_GCR);
   3645 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3646 			CSR_WRITE(sc, WMREG_GCR, reg);
   3647 
   3648 			if ((sc->sc_type == WM_T_82574)
   3649 			    || (sc->sc_type == WM_T_82583)) {
   3650 				/*
   3651 				 * Document says this bit must be set for
   3652 				 * proper operation.
   3653 				 */
   3654 				reg = CSR_READ(sc, WMREG_GCR);
   3655 				reg |= __BIT(22);
   3656 				CSR_WRITE(sc, WMREG_GCR, reg);
   3657 
   3658 				/*
   3659 				 * Apply workaround for hardware errata
   3660 				 * documented in errata docs Fixes issue where
   3661 				 * some error prone or unreliable PCIe
   3662 				 * completions are occurring, particularly
   3663 				 * with ASPM enabled. Without fix, issue can
   3664 				 * cause Tx timeouts.
   3665 				 */
   3666 				reg = CSR_READ(sc, WMREG_GCR2);
   3667 				reg |= __BIT(0);
   3668 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3669 			}
   3670 			break;
   3671 		case WM_T_80003:
   3672 			/* TARC0 */
   3673 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3674 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3675 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3676 
   3677 			/* TARC1 bit 28 */
   3678 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3679 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3680 				tarc1 &= ~__BIT(28);
   3681 			else
   3682 				tarc1 |= __BIT(28);
   3683 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3684 			break;
   3685 		case WM_T_ICH8:
   3686 		case WM_T_ICH9:
   3687 		case WM_T_ICH10:
   3688 		case WM_T_PCH:
   3689 		case WM_T_PCH2:
   3690 		case WM_T_PCH_LPT:
   3691 		case WM_T_PCH_SPT:
   3692 			/* TARC0 */
   3693 			if ((sc->sc_type == WM_T_ICH8)
   3694 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3695 				/* Set TARC0 bits 29 and 28 */
   3696 				tarc0 |= __BITS(29, 28);
   3697 			}
   3698 			/* Set TARC0 bits 23,24,26,27 */
   3699 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3700 
   3701 			/* CTRL_EXT */
   3702 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3703 			reg |= __BIT(22);	/* Set bit 22 */
   3704 			/*
   3705 			 * Enable PHY low-power state when MAC is at D3
   3706 			 * w/o WoL
   3707 			 */
   3708 			if (sc->sc_type >= WM_T_PCH)
   3709 				reg |= CTRL_EXT_PHYPDEN;
   3710 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3711 
   3712 			/* TARC1 */
   3713 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3714 			/* bit 28 */
   3715 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3716 				tarc1 &= ~__BIT(28);
   3717 			else
   3718 				tarc1 |= __BIT(28);
   3719 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3720 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3721 
   3722 			/* Device Status */
   3723 			if (sc->sc_type == WM_T_ICH8) {
   3724 				reg = CSR_READ(sc, WMREG_STATUS);
   3725 				reg &= ~__BIT(31);
   3726 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3727 
   3728 			}
   3729 
   3730 			/* IOSFPC */
   3731 			if (sc->sc_type == WM_T_PCH_SPT) {
   3732 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3733 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3734 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3735 			}
   3736 			/*
   3737 			 * Work-around descriptor data corruption issue during
   3738 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3739 			 * capability.
   3740 			 */
   3741 			reg = CSR_READ(sc, WMREG_RFCTL);
   3742 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3743 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3744 			break;
   3745 		default:
   3746 			break;
   3747 		}
   3748 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3749 
   3750 		switch (sc->sc_type) {
   3751 		/*
   3752 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3753 		 * Avoid RSS Hash Value bug.
   3754 		 */
   3755 		case WM_T_82571:
   3756 		case WM_T_82572:
   3757 		case WM_T_82573:
   3758 		case WM_T_80003:
   3759 		case WM_T_ICH8:
   3760 			reg = CSR_READ(sc, WMREG_RFCTL);
   3761 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3762 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3763 			break;
   3764 		case WM_T_82574:
   3765 			/* use extened Rx descriptor. */
   3766 			reg = CSR_READ(sc, WMREG_RFCTL);
   3767 			reg |= WMREG_RFCTL_EXSTEN;
   3768 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3769 			break;
   3770 		default:
   3771 			break;
   3772 		}
   3773 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   3774 		/*
   3775 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3776 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3777 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3778 		 * Correctly by the Device"
   3779 		 *
   3780 		 * I354(C2000) Errata AVR53:
   3781 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3782 		 * Hang"
   3783 		 */
   3784 		reg = CSR_READ(sc, WMREG_RFCTL);
   3785 		reg |= WMREG_RFCTL_IPV6EXDIS;
   3786 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   3787 	}
   3788 }
   3789 
   3790 static uint32_t
   3791 wm_rxpbs_adjust_82580(uint32_t val)
   3792 {
   3793 	uint32_t rv = 0;
   3794 
   3795 	if (val < __arraycount(wm_82580_rxpbs_table))
   3796 		rv = wm_82580_rxpbs_table[val];
   3797 
   3798 	return rv;
   3799 }
   3800 
   3801 /*
   3802  * wm_reset_phy:
   3803  *
   3804  *	generic PHY reset function.
   3805  *	Same as e1000_phy_hw_reset_generic()
   3806  */
   3807 static void
   3808 wm_reset_phy(struct wm_softc *sc)
   3809 {
   3810 	uint32_t reg;
   3811 
   3812 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3813 		device_xname(sc->sc_dev), __func__));
   3814 	if (wm_phy_resetisblocked(sc))
   3815 		return;
   3816 
   3817 	sc->phy.acquire(sc);
   3818 
   3819 	reg = CSR_READ(sc, WMREG_CTRL);
   3820 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3821 	CSR_WRITE_FLUSH(sc);
   3822 
   3823 	delay(sc->phy.reset_delay_us);
   3824 
   3825 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3826 	CSR_WRITE_FLUSH(sc);
   3827 
   3828 	delay(150);
   3829 
   3830 	sc->phy.release(sc);
   3831 
   3832 	wm_get_cfg_done(sc);
   3833 }
   3834 
   3835 static void
   3836 wm_flush_desc_rings(struct wm_softc *sc)
   3837 {
   3838 	pcireg_t preg;
   3839 	uint32_t reg;
   3840 	int nexttx;
   3841 
   3842 	/* First, disable MULR fix in FEXTNVM11 */
   3843 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3844 	reg |= FEXTNVM11_DIS_MULRFIX;
   3845 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3846 
   3847 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3848 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3849 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3850 		struct wm_txqueue *txq;
   3851 		wiseman_txdesc_t *txd;
   3852 
   3853 		/* TX */
   3854 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3855 		    device_xname(sc->sc_dev), preg, reg);
   3856 		reg = CSR_READ(sc, WMREG_TCTL);
   3857 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3858 
   3859 		txq = &sc->sc_queue[0].wmq_txq;
   3860 		nexttx = txq->txq_next;
   3861 		txd = &txq->txq_descs[nexttx];
   3862 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3863 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3864 		txd->wtx_fields.wtxu_status = 0;
   3865 		txd->wtx_fields.wtxu_options = 0;
   3866 		txd->wtx_fields.wtxu_vlan = 0;
   3867 
   3868 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3869 			BUS_SPACE_BARRIER_WRITE);
   3870 
   3871 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3872 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3873 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3874 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3875 		delay(250);
   3876 	}
   3877 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3878 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3879 		uint32_t rctl;
   3880 
   3881 		/* RX */
   3882 		printf("%s: Need RX flush (reg = %08x)\n",
   3883 		    device_xname(sc->sc_dev), preg);
   3884 		rctl = CSR_READ(sc, WMREG_RCTL);
   3885 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3886 		CSR_WRITE_FLUSH(sc);
   3887 		delay(150);
   3888 
   3889 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3890 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3891 		reg &= 0xffffc000;
   3892 		/*
   3893 		 * update thresholds: prefetch threshold to 31, host threshold
   3894 		 * to 1 and make sure the granularity is "descriptors" and not
   3895 		 * "cache lines"
   3896 		 */
   3897 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3898 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3899 
   3900 		/*
   3901 		 * momentarily enable the RX ring for the changes to take
   3902 		 * effect
   3903 		 */
   3904 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3905 		CSR_WRITE_FLUSH(sc);
   3906 		delay(150);
   3907 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3908 	}
   3909 }
   3910 
   3911 /*
   3912  * wm_reset:
   3913  *
   3914  *	Reset the i82542 chip.
   3915  */
   3916 static void
   3917 wm_reset(struct wm_softc *sc)
   3918 {
   3919 	int phy_reset = 0;
   3920 	int i, error = 0;
   3921 	uint32_t reg;
   3922 
   3923 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3924 		device_xname(sc->sc_dev), __func__));
   3925 	KASSERT(sc->sc_type != 0);
   3926 
   3927 	/*
   3928 	 * Allocate on-chip memory according to the MTU size.
   3929 	 * The Packet Buffer Allocation register must be written
   3930 	 * before the chip is reset.
   3931 	 */
   3932 	switch (sc->sc_type) {
   3933 	case WM_T_82547:
   3934 	case WM_T_82547_2:
   3935 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3936 		    PBA_22K : PBA_30K;
   3937 		for (i = 0; i < sc->sc_nqueues; i++) {
   3938 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3939 			txq->txq_fifo_head = 0;
   3940 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3941 			txq->txq_fifo_size =
   3942 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3943 			txq->txq_fifo_stall = 0;
   3944 		}
   3945 		break;
   3946 	case WM_T_82571:
   3947 	case WM_T_82572:
   3948 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3949 	case WM_T_80003:
   3950 		sc->sc_pba = PBA_32K;
   3951 		break;
   3952 	case WM_T_82573:
   3953 		sc->sc_pba = PBA_12K;
   3954 		break;
   3955 	case WM_T_82574:
   3956 	case WM_T_82583:
   3957 		sc->sc_pba = PBA_20K;
   3958 		break;
   3959 	case WM_T_82576:
   3960 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3961 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3962 		break;
   3963 	case WM_T_82580:
   3964 	case WM_T_I350:
   3965 	case WM_T_I354:
   3966 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3967 		break;
   3968 	case WM_T_I210:
   3969 	case WM_T_I211:
   3970 		sc->sc_pba = PBA_34K;
   3971 		break;
   3972 	case WM_T_ICH8:
   3973 		/* Workaround for a bit corruption issue in FIFO memory */
   3974 		sc->sc_pba = PBA_8K;
   3975 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3976 		break;
   3977 	case WM_T_ICH9:
   3978 	case WM_T_ICH10:
   3979 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3980 		    PBA_14K : PBA_10K;
   3981 		break;
   3982 	case WM_T_PCH:
   3983 	case WM_T_PCH2:
   3984 	case WM_T_PCH_LPT:
   3985 	case WM_T_PCH_SPT:
   3986 		sc->sc_pba = PBA_26K;
   3987 		break;
   3988 	default:
   3989 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3990 		    PBA_40K : PBA_48K;
   3991 		break;
   3992 	}
   3993 	/*
   3994 	 * Only old or non-multiqueue devices have the PBA register
   3995 	 * XXX Need special handling for 82575.
   3996 	 */
   3997 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3998 	    || (sc->sc_type == WM_T_82575))
   3999 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4000 
   4001 	/* Prevent the PCI-E bus from sticking */
   4002 	if (sc->sc_flags & WM_F_PCIE) {
   4003 		int timeout = 800;
   4004 
   4005 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4006 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4007 
   4008 		while (timeout--) {
   4009 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4010 			    == 0)
   4011 				break;
   4012 			delay(100);
   4013 		}
   4014 	}
   4015 
   4016 	/* Set the completion timeout for interface */
   4017 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4018 	    || (sc->sc_type == WM_T_82580)
   4019 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4020 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4021 		wm_set_pcie_completion_timeout(sc);
   4022 
   4023 	/* Clear interrupt */
   4024 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4025 	if (sc->sc_nintrs > 1) {
   4026 		if (sc->sc_type != WM_T_82574) {
   4027 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4028 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4029 		} else {
   4030 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4031 		}
   4032 	}
   4033 
   4034 	/* Stop the transmit and receive processes. */
   4035 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4036 	sc->sc_rctl &= ~RCTL_EN;
   4037 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4038 	CSR_WRITE_FLUSH(sc);
   4039 
   4040 	/* XXX set_tbi_sbp_82543() */
   4041 
   4042 	delay(10*1000);
   4043 
   4044 	/* Must acquire the MDIO ownership before MAC reset */
   4045 	switch (sc->sc_type) {
   4046 	case WM_T_82573:
   4047 	case WM_T_82574:
   4048 	case WM_T_82583:
   4049 		error = wm_get_hw_semaphore_82573(sc);
   4050 		break;
   4051 	default:
   4052 		break;
   4053 	}
   4054 
   4055 	/*
   4056 	 * 82541 Errata 29? & 82547 Errata 28?
   4057 	 * See also the description about PHY_RST bit in CTRL register
   4058 	 * in 8254x_GBe_SDM.pdf.
   4059 	 */
   4060 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4061 		CSR_WRITE(sc, WMREG_CTRL,
   4062 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4063 		CSR_WRITE_FLUSH(sc);
   4064 		delay(5000);
   4065 	}
   4066 
   4067 	switch (sc->sc_type) {
   4068 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4069 	case WM_T_82541:
   4070 	case WM_T_82541_2:
   4071 	case WM_T_82547:
   4072 	case WM_T_82547_2:
   4073 		/*
   4074 		 * On some chipsets, a reset through a memory-mapped write
   4075 		 * cycle can cause the chip to reset before completing the
   4076 		 * write cycle.  This causes major headache that can be
   4077 		 * avoided by issuing the reset via indirect register writes
   4078 		 * through I/O space.
   4079 		 *
   4080 		 * So, if we successfully mapped the I/O BAR at attach time,
   4081 		 * use that.  Otherwise, try our luck with a memory-mapped
   4082 		 * reset.
   4083 		 */
   4084 		if (sc->sc_flags & WM_F_IOH_VALID)
   4085 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4086 		else
   4087 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4088 		break;
   4089 	case WM_T_82545_3:
   4090 	case WM_T_82546_3:
   4091 		/* Use the shadow control register on these chips. */
   4092 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4093 		break;
   4094 	case WM_T_80003:
   4095 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4096 		sc->phy.acquire(sc);
   4097 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4098 		sc->phy.release(sc);
   4099 		break;
   4100 	case WM_T_ICH8:
   4101 	case WM_T_ICH9:
   4102 	case WM_T_ICH10:
   4103 	case WM_T_PCH:
   4104 	case WM_T_PCH2:
   4105 	case WM_T_PCH_LPT:
   4106 	case WM_T_PCH_SPT:
   4107 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4108 		if (wm_phy_resetisblocked(sc) == false) {
   4109 			/*
   4110 			 * Gate automatic PHY configuration by hardware on
   4111 			 * non-managed 82579
   4112 			 */
   4113 			if ((sc->sc_type == WM_T_PCH2)
   4114 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4115 				== 0))
   4116 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4117 
   4118 			reg |= CTRL_PHY_RESET;
   4119 			phy_reset = 1;
   4120 		} else
   4121 			printf("XXX reset is blocked!!!\n");
   4122 		sc->phy.acquire(sc);
   4123 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4124 		/* Don't insert a completion barrier when reset */
   4125 		delay(20*1000);
   4126 		mutex_exit(sc->sc_ich_phymtx);
   4127 		break;
   4128 	case WM_T_82580:
   4129 	case WM_T_I350:
   4130 	case WM_T_I354:
   4131 	case WM_T_I210:
   4132 	case WM_T_I211:
   4133 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4134 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4135 			CSR_WRITE_FLUSH(sc);
   4136 		delay(5000);
   4137 		break;
   4138 	case WM_T_82542_2_0:
   4139 	case WM_T_82542_2_1:
   4140 	case WM_T_82543:
   4141 	case WM_T_82540:
   4142 	case WM_T_82545:
   4143 	case WM_T_82546:
   4144 	case WM_T_82571:
   4145 	case WM_T_82572:
   4146 	case WM_T_82573:
   4147 	case WM_T_82574:
   4148 	case WM_T_82575:
   4149 	case WM_T_82576:
   4150 	case WM_T_82583:
   4151 	default:
   4152 		/* Everything else can safely use the documented method. */
   4153 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4154 		break;
   4155 	}
   4156 
   4157 	/* Must release the MDIO ownership after MAC reset */
   4158 	switch (sc->sc_type) {
   4159 	case WM_T_82573:
   4160 	case WM_T_82574:
   4161 	case WM_T_82583:
   4162 		if (error == 0)
   4163 			wm_put_hw_semaphore_82573(sc);
   4164 		break;
   4165 	default:
   4166 		break;
   4167 	}
   4168 
   4169 	if (phy_reset != 0)
   4170 		wm_get_cfg_done(sc);
   4171 
   4172 	/* reload EEPROM */
   4173 	switch (sc->sc_type) {
   4174 	case WM_T_82542_2_0:
   4175 	case WM_T_82542_2_1:
   4176 	case WM_T_82543:
   4177 	case WM_T_82544:
   4178 		delay(10);
   4179 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4180 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4181 		CSR_WRITE_FLUSH(sc);
   4182 		delay(2000);
   4183 		break;
   4184 	case WM_T_82540:
   4185 	case WM_T_82545:
   4186 	case WM_T_82545_3:
   4187 	case WM_T_82546:
   4188 	case WM_T_82546_3:
   4189 		delay(5*1000);
   4190 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4191 		break;
   4192 	case WM_T_82541:
   4193 	case WM_T_82541_2:
   4194 	case WM_T_82547:
   4195 	case WM_T_82547_2:
   4196 		delay(20000);
   4197 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4198 		break;
   4199 	case WM_T_82571:
   4200 	case WM_T_82572:
   4201 	case WM_T_82573:
   4202 	case WM_T_82574:
   4203 	case WM_T_82583:
   4204 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4205 			delay(10);
   4206 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4207 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4208 			CSR_WRITE_FLUSH(sc);
   4209 		}
   4210 		/* check EECD_EE_AUTORD */
   4211 		wm_get_auto_rd_done(sc);
   4212 		/*
   4213 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4214 		 * is set.
   4215 		 */
   4216 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4217 		    || (sc->sc_type == WM_T_82583))
   4218 			delay(25*1000);
   4219 		break;
   4220 	case WM_T_82575:
   4221 	case WM_T_82576:
   4222 	case WM_T_82580:
   4223 	case WM_T_I350:
   4224 	case WM_T_I354:
   4225 	case WM_T_I210:
   4226 	case WM_T_I211:
   4227 	case WM_T_80003:
   4228 		/* check EECD_EE_AUTORD */
   4229 		wm_get_auto_rd_done(sc);
   4230 		break;
   4231 	case WM_T_ICH8:
   4232 	case WM_T_ICH9:
   4233 	case WM_T_ICH10:
   4234 	case WM_T_PCH:
   4235 	case WM_T_PCH2:
   4236 	case WM_T_PCH_LPT:
   4237 	case WM_T_PCH_SPT:
   4238 		break;
   4239 	default:
   4240 		panic("%s: unknown type\n", __func__);
   4241 	}
   4242 
   4243 	/* Check whether EEPROM is present or not */
   4244 	switch (sc->sc_type) {
   4245 	case WM_T_82575:
   4246 	case WM_T_82576:
   4247 	case WM_T_82580:
   4248 	case WM_T_I350:
   4249 	case WM_T_I354:
   4250 	case WM_T_ICH8:
   4251 	case WM_T_ICH9:
   4252 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4253 			/* Not found */
   4254 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4255 			if (sc->sc_type == WM_T_82575)
   4256 				wm_reset_init_script_82575(sc);
   4257 		}
   4258 		break;
   4259 	default:
   4260 		break;
   4261 	}
   4262 
   4263 	if ((sc->sc_type == WM_T_82580)
   4264 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4265 		/* clear global device reset status bit */
   4266 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4267 	}
   4268 
   4269 	/* Clear any pending interrupt events. */
   4270 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4271 	reg = CSR_READ(sc, WMREG_ICR);
   4272 	if (sc->sc_nintrs > 1) {
   4273 		if (sc->sc_type != WM_T_82574) {
   4274 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4275 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4276 		} else
   4277 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4278 	}
   4279 
   4280 	/* reload sc_ctrl */
   4281 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4282 
   4283 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4284 		wm_set_eee_i350(sc);
   4285 
   4286 	/* Clear the host wakeup bit after lcd reset */
   4287 	if (sc->sc_type >= WM_T_PCH) {
   4288 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4289 		    BM_PORT_GEN_CFG);
   4290 		reg &= ~BM_WUC_HOST_WU_BIT;
   4291 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4292 		    BM_PORT_GEN_CFG, reg);
   4293 	}
   4294 
   4295 	/*
   4296 	 * For PCH, this write will make sure that any noise will be detected
   4297 	 * as a CRC error and be dropped rather than show up as a bad packet
   4298 	 * to the DMA engine
   4299 	 */
   4300 	if (sc->sc_type == WM_T_PCH)
   4301 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4302 
   4303 	if (sc->sc_type >= WM_T_82544)
   4304 		CSR_WRITE(sc, WMREG_WUC, 0);
   4305 
   4306 	wm_reset_mdicnfg_82580(sc);
   4307 
   4308 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4309 		wm_pll_workaround_i210(sc);
   4310 }
   4311 
   4312 /*
   4313  * wm_add_rxbuf:
   4314  *
   4315  *	Add a receive buffer to the indiciated descriptor.
   4316  */
   4317 static int
   4318 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4319 {
   4320 	struct wm_softc *sc = rxq->rxq_sc;
   4321 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4322 	struct mbuf *m;
   4323 	int error;
   4324 
   4325 	KASSERT(mutex_owned(rxq->rxq_lock));
   4326 
   4327 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4328 	if (m == NULL)
   4329 		return ENOBUFS;
   4330 
   4331 	MCLGET(m, M_DONTWAIT);
   4332 	if ((m->m_flags & M_EXT) == 0) {
   4333 		m_freem(m);
   4334 		return ENOBUFS;
   4335 	}
   4336 
   4337 	if (rxs->rxs_mbuf != NULL)
   4338 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4339 
   4340 	rxs->rxs_mbuf = m;
   4341 
   4342 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4343 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4344 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4345 	if (error) {
   4346 		/* XXX XXX XXX */
   4347 		aprint_error_dev(sc->sc_dev,
   4348 		    "unable to load rx DMA map %d, error = %d\n",
   4349 		    idx, error);
   4350 		panic("wm_add_rxbuf");
   4351 	}
   4352 
   4353 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4354 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4355 
   4356 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4357 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4358 			wm_init_rxdesc(rxq, idx);
   4359 	} else
   4360 		wm_init_rxdesc(rxq, idx);
   4361 
   4362 	return 0;
   4363 }
   4364 
   4365 /*
   4366  * wm_rxdrain:
   4367  *
   4368  *	Drain the receive queue.
   4369  */
   4370 static void
   4371 wm_rxdrain(struct wm_rxqueue *rxq)
   4372 {
   4373 	struct wm_softc *sc = rxq->rxq_sc;
   4374 	struct wm_rxsoft *rxs;
   4375 	int i;
   4376 
   4377 	KASSERT(mutex_owned(rxq->rxq_lock));
   4378 
   4379 	for (i = 0; i < WM_NRXDESC; i++) {
   4380 		rxs = &rxq->rxq_soft[i];
   4381 		if (rxs->rxs_mbuf != NULL) {
   4382 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4383 			m_freem(rxs->rxs_mbuf);
   4384 			rxs->rxs_mbuf = NULL;
   4385 		}
   4386 	}
   4387 }
   4388 
   4389 
   4390 /*
   4391  * XXX copy from FreeBSD's sys/net/rss_config.c
   4392  */
   4393 /*
   4394  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4395  * effectiveness may be limited by algorithm choice and available entropy
   4396  * during the boot.
   4397  *
   4398  * XXXRW: And that we don't randomize it yet!
   4399  *
   4400  * This is the default Microsoft RSS specification key which is also
   4401  * the Chelsio T5 firmware default key.
   4402  */
   4403 #define RSS_KEYSIZE 40
   4404 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4405 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4406 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4407 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4408 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4409 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4410 };
   4411 
   4412 /*
   4413  * Caller must pass an array of size sizeof(rss_key).
   4414  *
   4415  * XXX
   4416  * As if_ixgbe may use this function, this function should not be
   4417  * if_wm specific function.
   4418  */
   4419 static void
   4420 wm_rss_getkey(uint8_t *key)
   4421 {
   4422 
   4423 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4424 }
   4425 
   4426 /*
   4427  * Setup registers for RSS.
   4428  *
   4429  * XXX not yet VMDq support
   4430  */
   4431 static void
   4432 wm_init_rss(struct wm_softc *sc)
   4433 {
   4434 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4435 	int i;
   4436 
   4437 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4438 
   4439 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4440 		int qid, reta_ent;
   4441 
   4442 		qid  = i % sc->sc_nqueues;
   4443 		switch(sc->sc_type) {
   4444 		case WM_T_82574:
   4445 			reta_ent = __SHIFTIN(qid,
   4446 			    RETA_ENT_QINDEX_MASK_82574);
   4447 			break;
   4448 		case WM_T_82575:
   4449 			reta_ent = __SHIFTIN(qid,
   4450 			    RETA_ENT_QINDEX1_MASK_82575);
   4451 			break;
   4452 		default:
   4453 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4454 			break;
   4455 		}
   4456 
   4457 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4458 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4459 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4460 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4461 	}
   4462 
   4463 	wm_rss_getkey((uint8_t *)rss_key);
   4464 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4465 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4466 
   4467 	if (sc->sc_type == WM_T_82574)
   4468 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4469 	else
   4470 		mrqc = MRQC_ENABLE_RSS_MQ;
   4471 
   4472 	/*
   4473 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4474 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4475 	 */
   4476 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4477 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4478 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4479 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4480 
   4481 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4482 }
   4483 
   4484 /*
   4485  * Adjust TX and RX queue numbers which the system actulally uses.
   4486  *
   4487  * The numbers are affected by below parameters.
   4488  *     - The nubmer of hardware queues
   4489  *     - The number of MSI-X vectors (= "nvectors" argument)
   4490  *     - ncpu
   4491  */
   4492 static void
   4493 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4494 {
   4495 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4496 
   4497 	if (nvectors < 2) {
   4498 		sc->sc_nqueues = 1;
   4499 		return;
   4500 	}
   4501 
   4502 	switch(sc->sc_type) {
   4503 	case WM_T_82572:
   4504 		hw_ntxqueues = 2;
   4505 		hw_nrxqueues = 2;
   4506 		break;
   4507 	case WM_T_82574:
   4508 		hw_ntxqueues = 2;
   4509 		hw_nrxqueues = 2;
   4510 		break;
   4511 	case WM_T_82575:
   4512 		hw_ntxqueues = 4;
   4513 		hw_nrxqueues = 4;
   4514 		break;
   4515 	case WM_T_82576:
   4516 		hw_ntxqueues = 16;
   4517 		hw_nrxqueues = 16;
   4518 		break;
   4519 	case WM_T_82580:
   4520 	case WM_T_I350:
   4521 	case WM_T_I354:
   4522 		hw_ntxqueues = 8;
   4523 		hw_nrxqueues = 8;
   4524 		break;
   4525 	case WM_T_I210:
   4526 		hw_ntxqueues = 4;
   4527 		hw_nrxqueues = 4;
   4528 		break;
   4529 	case WM_T_I211:
   4530 		hw_ntxqueues = 2;
   4531 		hw_nrxqueues = 2;
   4532 		break;
   4533 		/*
   4534 		 * As below ethernet controllers does not support MSI-X,
   4535 		 * this driver let them not use multiqueue.
   4536 		 *     - WM_T_80003
   4537 		 *     - WM_T_ICH8
   4538 		 *     - WM_T_ICH9
   4539 		 *     - WM_T_ICH10
   4540 		 *     - WM_T_PCH
   4541 		 *     - WM_T_PCH2
   4542 		 *     - WM_T_PCH_LPT
   4543 		 */
   4544 	default:
   4545 		hw_ntxqueues = 1;
   4546 		hw_nrxqueues = 1;
   4547 		break;
   4548 	}
   4549 
   4550 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4551 
   4552 	/*
   4553 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4554 	 * the number of queues used actually.
   4555 	 */
   4556 	if (nvectors < hw_nqueues + 1) {
   4557 		sc->sc_nqueues = nvectors - 1;
   4558 	} else {
   4559 		sc->sc_nqueues = hw_nqueues;
   4560 	}
   4561 
   4562 	/*
   4563 	 * As queues more then cpus cannot improve scaling, we limit
   4564 	 * the number of queues used actually.
   4565 	 */
   4566 	if (ncpu < sc->sc_nqueues)
   4567 		sc->sc_nqueues = ncpu;
   4568 }
   4569 
   4570 static int
   4571 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4572 {
   4573 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4574 	wmq->wmq_id = qidx;
   4575 	wmq->wmq_intr_idx = intr_idx;
   4576 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4577 #ifdef WM_MPSAFE
   4578 	    | SOFTINT_MPSAFE
   4579 #endif
   4580 	    , wm_handle_queue, wmq);
   4581 	if (wmq->wmq_si != NULL)
   4582 		return 0;
   4583 
   4584 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4585 	    wmq->wmq_id);
   4586 
   4587 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4588 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4589 	return ENOMEM;
   4590 }
   4591 
   4592 /*
   4593  * Both single interrupt MSI and INTx can use this function.
   4594  */
   4595 static int
   4596 wm_setup_legacy(struct wm_softc *sc)
   4597 {
   4598 	pci_chipset_tag_t pc = sc->sc_pc;
   4599 	const char *intrstr = NULL;
   4600 	char intrbuf[PCI_INTRSTR_LEN];
   4601 	int error;
   4602 
   4603 	error = wm_alloc_txrx_queues(sc);
   4604 	if (error) {
   4605 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4606 		    error);
   4607 		return ENOMEM;
   4608 	}
   4609 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4610 	    sizeof(intrbuf));
   4611 #ifdef WM_MPSAFE
   4612 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4613 #endif
   4614 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4615 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4616 	if (sc->sc_ihs[0] == NULL) {
   4617 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4618 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4619 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4620 		return ENOMEM;
   4621 	}
   4622 
   4623 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4624 	sc->sc_nintrs = 1;
   4625 
   4626 	return wm_softint_establish(sc, 0, 0);
   4627 }
   4628 
   4629 static int
   4630 wm_setup_msix(struct wm_softc *sc)
   4631 {
   4632 	void *vih;
   4633 	kcpuset_t *affinity;
   4634 	int qidx, error, intr_idx, txrx_established;
   4635 	pci_chipset_tag_t pc = sc->sc_pc;
   4636 	const char *intrstr = NULL;
   4637 	char intrbuf[PCI_INTRSTR_LEN];
   4638 	char intr_xname[INTRDEVNAMEBUF];
   4639 
   4640 	if (sc->sc_nqueues < ncpu) {
   4641 		/*
   4642 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4643 		 * interrupts start from CPU#1.
   4644 		 */
   4645 		sc->sc_affinity_offset = 1;
   4646 	} else {
   4647 		/*
   4648 		 * In this case, this device use all CPUs. So, we unify
   4649 		 * affinitied cpu_index to msix vector number for readability.
   4650 		 */
   4651 		sc->sc_affinity_offset = 0;
   4652 	}
   4653 
   4654 	error = wm_alloc_txrx_queues(sc);
   4655 	if (error) {
   4656 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4657 		    error);
   4658 		return ENOMEM;
   4659 	}
   4660 
   4661 	kcpuset_create(&affinity, false);
   4662 	intr_idx = 0;
   4663 
   4664 	/*
   4665 	 * TX and RX
   4666 	 */
   4667 	txrx_established = 0;
   4668 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4669 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4670 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4671 
   4672 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4673 		    sizeof(intrbuf));
   4674 #ifdef WM_MPSAFE
   4675 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4676 		    PCI_INTR_MPSAFE, true);
   4677 #endif
   4678 		memset(intr_xname, 0, sizeof(intr_xname));
   4679 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4680 		    device_xname(sc->sc_dev), qidx);
   4681 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4682 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4683 		if (vih == NULL) {
   4684 			aprint_error_dev(sc->sc_dev,
   4685 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4686 			    intrstr ? " at " : "",
   4687 			    intrstr ? intrstr : "");
   4688 
   4689 			goto fail;
   4690 		}
   4691 		kcpuset_zero(affinity);
   4692 		/* Round-robin affinity */
   4693 		kcpuset_set(affinity, affinity_to);
   4694 		error = interrupt_distribute(vih, affinity, NULL);
   4695 		if (error == 0) {
   4696 			aprint_normal_dev(sc->sc_dev,
   4697 			    "for TX and RX interrupting at %s affinity to %u\n",
   4698 			    intrstr, affinity_to);
   4699 		} else {
   4700 			aprint_normal_dev(sc->sc_dev,
   4701 			    "for TX and RX interrupting at %s\n", intrstr);
   4702 		}
   4703 		sc->sc_ihs[intr_idx] = vih;
   4704 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   4705 			goto fail;
   4706 		txrx_established++;
   4707 		intr_idx++;
   4708 	}
   4709 
   4710 	/*
   4711 	 * LINK
   4712 	 */
   4713 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4714 	    sizeof(intrbuf));
   4715 #ifdef WM_MPSAFE
   4716 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4717 #endif
   4718 	memset(intr_xname, 0, sizeof(intr_xname));
   4719 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4720 	    device_xname(sc->sc_dev));
   4721 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4722 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4723 	if (vih == NULL) {
   4724 		aprint_error_dev(sc->sc_dev,
   4725 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4726 		    intrstr ? " at " : "",
   4727 		    intrstr ? intrstr : "");
   4728 
   4729 		goto fail;
   4730 	}
   4731 	/* keep default affinity to LINK interrupt */
   4732 	aprint_normal_dev(sc->sc_dev,
   4733 	    "for LINK interrupting at %s\n", intrstr);
   4734 	sc->sc_ihs[intr_idx] = vih;
   4735 	sc->sc_link_intr_idx = intr_idx;
   4736 
   4737 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4738 	kcpuset_destroy(affinity);
   4739 	return 0;
   4740 
   4741  fail:
   4742 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4743 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4744 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4745 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4746 	}
   4747 
   4748 	kcpuset_destroy(affinity);
   4749 	return ENOMEM;
   4750 }
   4751 
   4752 static void
   4753 wm_turnon(struct wm_softc *sc)
   4754 {
   4755 	int i;
   4756 
   4757 	KASSERT(WM_CORE_LOCKED(sc));
   4758 
   4759 	/*
   4760 	 * must unset stopping flags in ascending order.
   4761 	 */
   4762 	for(i = 0; i < sc->sc_nqueues; i++) {
   4763 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4764 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4765 
   4766 		mutex_enter(txq->txq_lock);
   4767 		txq->txq_stopping = false;
   4768 		mutex_exit(txq->txq_lock);
   4769 
   4770 		mutex_enter(rxq->rxq_lock);
   4771 		rxq->rxq_stopping = false;
   4772 		mutex_exit(rxq->rxq_lock);
   4773 	}
   4774 
   4775 	sc->sc_core_stopping = false;
   4776 }
   4777 
   4778 static void
   4779 wm_turnoff(struct wm_softc *sc)
   4780 {
   4781 	int i;
   4782 
   4783 	KASSERT(WM_CORE_LOCKED(sc));
   4784 
   4785 	sc->sc_core_stopping = true;
   4786 
   4787 	/*
   4788 	 * must set stopping flags in ascending order.
   4789 	 */
   4790 	for(i = 0; i < sc->sc_nqueues; i++) {
   4791 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4792 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4793 
   4794 		mutex_enter(rxq->rxq_lock);
   4795 		rxq->rxq_stopping = true;
   4796 		mutex_exit(rxq->rxq_lock);
   4797 
   4798 		mutex_enter(txq->txq_lock);
   4799 		txq->txq_stopping = true;
   4800 		mutex_exit(txq->txq_lock);
   4801 	}
   4802 }
   4803 
   4804 /*
   4805  * wm_init:		[ifnet interface function]
   4806  *
   4807  *	Initialize the interface.
   4808  */
   4809 static int
   4810 wm_init(struct ifnet *ifp)
   4811 {
   4812 	struct wm_softc *sc = ifp->if_softc;
   4813 	int ret;
   4814 
   4815 	WM_CORE_LOCK(sc);
   4816 	ret = wm_init_locked(ifp);
   4817 	WM_CORE_UNLOCK(sc);
   4818 
   4819 	return ret;
   4820 }
   4821 
   4822 static int
   4823 wm_init_locked(struct ifnet *ifp)
   4824 {
   4825 	struct wm_softc *sc = ifp->if_softc;
   4826 	int i, j, trynum, error = 0;
   4827 	uint32_t reg;
   4828 
   4829 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4830 		device_xname(sc->sc_dev), __func__));
   4831 	KASSERT(WM_CORE_LOCKED(sc));
   4832 
   4833 	/*
   4834 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4835 	 * There is a small but measurable benefit to avoiding the adjusment
   4836 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4837 	 * on such platforms.  One possibility is that the DMA itself is
   4838 	 * slightly more efficient if the front of the entire packet (instead
   4839 	 * of the front of the headers) is aligned.
   4840 	 *
   4841 	 * Note we must always set align_tweak to 0 if we are using
   4842 	 * jumbo frames.
   4843 	 */
   4844 #ifdef __NO_STRICT_ALIGNMENT
   4845 	sc->sc_align_tweak = 0;
   4846 #else
   4847 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4848 		sc->sc_align_tweak = 0;
   4849 	else
   4850 		sc->sc_align_tweak = 2;
   4851 #endif /* __NO_STRICT_ALIGNMENT */
   4852 
   4853 	/* Cancel any pending I/O. */
   4854 	wm_stop_locked(ifp, 0);
   4855 
   4856 	/* update statistics before reset */
   4857 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4858 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4859 
   4860 	/* PCH_SPT hardware workaround */
   4861 	if (sc->sc_type == WM_T_PCH_SPT)
   4862 		wm_flush_desc_rings(sc);
   4863 
   4864 	/* Reset the chip to a known state. */
   4865 	wm_reset(sc);
   4866 
   4867 	/* AMT based hardware can now take control from firmware */
   4868 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4869 		wm_get_hw_control(sc);
   4870 
   4871 	/* Init hardware bits */
   4872 	wm_initialize_hardware_bits(sc);
   4873 
   4874 	/* Reset the PHY. */
   4875 	if (sc->sc_flags & WM_F_HAS_MII)
   4876 		wm_gmii_reset(sc);
   4877 
   4878 	/* Calculate (E)ITR value */
   4879 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   4880 		/*
   4881 		 * For NEWQUEUE's EITR (except for 82575).
   4882 		 * 82575's EITR should be set same throttling value as other
   4883 		 * old controllers' ITR because the interrupt/sec calculation
   4884 		 * is the same, that is, 1,000,000,000 / (N * 256).
   4885 		 *
   4886 		 * 82574's EITR should be set same throttling value as ITR.
   4887 		 *
   4888 		 * For N interrupts/sec, set this value to:
   4889 		 * 1,000,000 / N in contrast to ITR throttoling value.
   4890 		 */
   4891 		sc->sc_itr_init = 450;
   4892 	} else if (sc->sc_type >= WM_T_82543) {
   4893 		/*
   4894 		 * Set up the interrupt throttling register (units of 256ns)
   4895 		 * Note that a footnote in Intel's documentation says this
   4896 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4897 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4898 		 * that that is also true for the 1024ns units of the other
   4899 		 * interrupt-related timer registers -- so, really, we ought
   4900 		 * to divide this value by 4 when the link speed is low.
   4901 		 *
   4902 		 * XXX implement this division at link speed change!
   4903 		 */
   4904 
   4905 		/*
   4906 		 * For N interrupts/sec, set this value to:
   4907 		 * 1,000,000,000 / (N * 256).  Note that we set the
   4908 		 * absolute and packet timer values to this value
   4909 		 * divided by 4 to get "simple timer" behavior.
   4910 		 */
   4911 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   4912 	}
   4913 
   4914 	error = wm_init_txrx_queues(sc);
   4915 	if (error)
   4916 		goto out;
   4917 
   4918 	/*
   4919 	 * Clear out the VLAN table -- we don't use it (yet).
   4920 	 */
   4921 	CSR_WRITE(sc, WMREG_VET, 0);
   4922 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4923 		trynum = 10; /* Due to hw errata */
   4924 	else
   4925 		trynum = 1;
   4926 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4927 		for (j = 0; j < trynum; j++)
   4928 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4929 
   4930 	/*
   4931 	 * Set up flow-control parameters.
   4932 	 *
   4933 	 * XXX Values could probably stand some tuning.
   4934 	 */
   4935 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4936 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4937 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4938 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4939 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4940 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4941 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4942 	}
   4943 
   4944 	sc->sc_fcrtl = FCRTL_DFLT;
   4945 	if (sc->sc_type < WM_T_82543) {
   4946 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4947 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4948 	} else {
   4949 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4950 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4951 	}
   4952 
   4953 	if (sc->sc_type == WM_T_80003)
   4954 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4955 	else
   4956 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4957 
   4958 	/* Writes the control register. */
   4959 	wm_set_vlan(sc);
   4960 
   4961 	if (sc->sc_flags & WM_F_HAS_MII) {
   4962 		int val;
   4963 
   4964 		switch (sc->sc_type) {
   4965 		case WM_T_80003:
   4966 		case WM_T_ICH8:
   4967 		case WM_T_ICH9:
   4968 		case WM_T_ICH10:
   4969 		case WM_T_PCH:
   4970 		case WM_T_PCH2:
   4971 		case WM_T_PCH_LPT:
   4972 		case WM_T_PCH_SPT:
   4973 			/*
   4974 			 * Set the mac to wait the maximum time between each
   4975 			 * iteration and increase the max iterations when
   4976 			 * polling the phy; this fixes erroneous timeouts at
   4977 			 * 10Mbps.
   4978 			 */
   4979 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4980 			    0xFFFF);
   4981 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4982 			val |= 0x3F;
   4983 			wm_kmrn_writereg(sc,
   4984 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4985 			break;
   4986 		default:
   4987 			break;
   4988 		}
   4989 
   4990 		if (sc->sc_type == WM_T_80003) {
   4991 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4992 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4993 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4994 
   4995 			/* Bypass RX and TX FIFO's */
   4996 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4997 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4998 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4999 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5000 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5001 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5002 		}
   5003 	}
   5004 #if 0
   5005 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5006 #endif
   5007 
   5008 	/* Set up checksum offload parameters. */
   5009 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5010 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5011 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5012 		reg |= RXCSUM_IPOFL;
   5013 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5014 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5015 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5016 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5017 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5018 
   5019 	/* Set up MSI-X */
   5020 	if (sc->sc_nintrs > 1) {
   5021 		uint32_t ivar;
   5022 		struct wm_queue *wmq;
   5023 		int qid, qintr_idx;
   5024 
   5025 		if (sc->sc_type == WM_T_82575) {
   5026 			/* Interrupt control */
   5027 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5028 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5029 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5030 
   5031 			/* TX and RX */
   5032 			for (i = 0; i < sc->sc_nqueues; i++) {
   5033 				wmq = &sc->sc_queue[i];
   5034 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5035 				    EITR_TX_QUEUE(wmq->wmq_id)
   5036 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5037 			}
   5038 			/* Link status */
   5039 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5040 			    EITR_OTHER);
   5041 		} else if (sc->sc_type == WM_T_82574) {
   5042 			/* Interrupt control */
   5043 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5044 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5045 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5046 
   5047 			/*
   5048 			 * workaround issue with spurious interrupts
   5049 			 * in MSI-X mode.
   5050 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5051 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5052 			 */
   5053 			reg = CSR_READ(sc, WMREG_RFCTL);
   5054 			reg |= WMREG_RFCTL_ACKDIS;
   5055 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5056 
   5057 			ivar = 0;
   5058 			/* TX and RX */
   5059 			for (i = 0; i < sc->sc_nqueues; i++) {
   5060 				wmq = &sc->sc_queue[i];
   5061 				qid = wmq->wmq_id;
   5062 				qintr_idx = wmq->wmq_intr_idx;
   5063 
   5064 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5065 				    IVAR_TX_MASK_Q_82574(qid));
   5066 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5067 				    IVAR_RX_MASK_Q_82574(qid));
   5068 			}
   5069 			/* Link status */
   5070 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5071 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5072 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5073 		} else {
   5074 			/* Interrupt control */
   5075 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5076 			    | GPIE_EIAME | GPIE_PBA);
   5077 
   5078 			switch (sc->sc_type) {
   5079 			case WM_T_82580:
   5080 			case WM_T_I350:
   5081 			case WM_T_I354:
   5082 			case WM_T_I210:
   5083 			case WM_T_I211:
   5084 				/* TX and RX */
   5085 				for (i = 0; i < sc->sc_nqueues; i++) {
   5086 					wmq = &sc->sc_queue[i];
   5087 					qid = wmq->wmq_id;
   5088 					qintr_idx = wmq->wmq_intr_idx;
   5089 
   5090 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5091 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5092 					ivar |= __SHIFTIN((qintr_idx
   5093 						| IVAR_VALID),
   5094 					    IVAR_TX_MASK_Q(qid));
   5095 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5096 					ivar |= __SHIFTIN((qintr_idx
   5097 						| IVAR_VALID),
   5098 					    IVAR_RX_MASK_Q(qid));
   5099 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5100 				}
   5101 				break;
   5102 			case WM_T_82576:
   5103 				/* TX and RX */
   5104 				for (i = 0; i < sc->sc_nqueues; i++) {
   5105 					wmq = &sc->sc_queue[i];
   5106 					qid = wmq->wmq_id;
   5107 					qintr_idx = wmq->wmq_intr_idx;
   5108 
   5109 					ivar = CSR_READ(sc,
   5110 					    WMREG_IVAR_Q_82576(qid));
   5111 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5112 					ivar |= __SHIFTIN((qintr_idx
   5113 						| IVAR_VALID),
   5114 					    IVAR_TX_MASK_Q_82576(qid));
   5115 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5116 					ivar |= __SHIFTIN((qintr_idx
   5117 						| IVAR_VALID),
   5118 					    IVAR_RX_MASK_Q_82576(qid));
   5119 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5120 					    ivar);
   5121 				}
   5122 				break;
   5123 			default:
   5124 				break;
   5125 			}
   5126 
   5127 			/* Link status */
   5128 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5129 			    IVAR_MISC_OTHER);
   5130 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5131 		}
   5132 
   5133 		if (sc->sc_nqueues > 1) {
   5134 			wm_init_rss(sc);
   5135 
   5136 			/*
   5137 			** NOTE: Receive Full-Packet Checksum Offload
   5138 			** is mutually exclusive with Multiqueue. However
   5139 			** this is not the same as TCP/IP checksums which
   5140 			** still work.
   5141 			*/
   5142 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5143 			reg |= RXCSUM_PCSD;
   5144 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5145 		}
   5146 	}
   5147 
   5148 	/* Set up the interrupt registers. */
   5149 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5150 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5151 	    ICR_RXO | ICR_RXT0;
   5152 	if (sc->sc_nintrs > 1) {
   5153 		uint32_t mask;
   5154 		struct wm_queue *wmq;
   5155 
   5156 		switch (sc->sc_type) {
   5157 		case WM_T_82574:
   5158 			mask = 0;
   5159 			for (i = 0; i < sc->sc_nqueues; i++) {
   5160 				wmq = &sc->sc_queue[i];
   5161 				mask |= ICR_TXQ(wmq->wmq_id);
   5162 				mask |= ICR_RXQ(wmq->wmq_id);
   5163 			}
   5164 			mask |= ICR_OTHER;
   5165 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5166 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5167 			break;
   5168 		default:
   5169 			if (sc->sc_type == WM_T_82575) {
   5170 				mask = 0;
   5171 				for (i = 0; i < sc->sc_nqueues; i++) {
   5172 					wmq = &sc->sc_queue[i];
   5173 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5174 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5175 				}
   5176 				mask |= EITR_OTHER;
   5177 			} else {
   5178 				mask = 0;
   5179 				for (i = 0; i < sc->sc_nqueues; i++) {
   5180 					wmq = &sc->sc_queue[i];
   5181 					mask |= 1 << wmq->wmq_intr_idx;
   5182 				}
   5183 				mask |= 1 << sc->sc_link_intr_idx;
   5184 			}
   5185 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5186 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5187 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5188 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5189 			break;
   5190 		}
   5191 	} else
   5192 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5193 
   5194 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5195 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5196 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5197 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   5198 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5199 		reg |= KABGTXD_BGSQLBIAS;
   5200 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5201 	}
   5202 
   5203 	/* Set up the inter-packet gap. */
   5204 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5205 
   5206 	if (sc->sc_type >= WM_T_82543) {
   5207 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5208 			int qidx;
   5209 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5210 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   5211 				uint32_t eitr = __SHIFTIN(wmq->wmq_itr,
   5212 				    EITR_ITR_INT_MASK);
   5213 
   5214 				/*
   5215 				 * 82575 doesn't have CNT_INGR field.
   5216 				 * So, overwrite counter field by software.
   5217 				 */
   5218 				if (sc->sc_type == WM_T_82575) {
   5219 					eitr |= __SHIFTIN(wmq->wmq_itr,
   5220 					    EITR_COUNTER_MASK_82575);
   5221 				} else {
   5222 					eitr |= EITR_CNT_INGR;
   5223 				}
   5224 
   5225 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   5226 				    eitr);
   5227 			}
   5228 			/*
   5229 			 * Link interrupts occur much less than TX
   5230 			 * interrupts and RX interrupts. So, we don't
   5231 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5232 			 * FreeBSD's if_igb.
   5233 			 */
   5234 		} else if (sc->sc_type == WM_T_82574 && sc->sc_nintrs > 1) {
   5235 			/*
   5236 			 * 82574 has both ITR and EITR. SET EITR when we use
   5237 			 * the multi queue function with MSI-X.
   5238 			 */
   5239 			for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5240 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   5241 				CSR_WRITE(sc,
   5242 				    WMREG_EITR_82574(wmq->wmq_intr_idx),
   5243 				    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5244 			}
   5245 		} else
   5246 			CSR_WRITE(sc, WMREG_ITR, sc->sc_queue[0].wmq_itr);
   5247 	}
   5248 
   5249 	/* Set the VLAN ethernetype. */
   5250 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5251 
   5252 	/*
   5253 	 * Set up the transmit control register; we start out with
   5254 	 * a collision distance suitable for FDX, but update it whe
   5255 	 * we resolve the media type.
   5256 	 */
   5257 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5258 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5259 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5260 	if (sc->sc_type >= WM_T_82571)
   5261 		sc->sc_tctl |= TCTL_MULR;
   5262 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5263 
   5264 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5265 		/* Write TDT after TCTL.EN is set. See the document. */
   5266 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5267 	}
   5268 
   5269 	if (sc->sc_type == WM_T_80003) {
   5270 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5271 		reg &= ~TCTL_EXT_GCEX_MASK;
   5272 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5273 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5274 	}
   5275 
   5276 	/* Set the media. */
   5277 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5278 		goto out;
   5279 
   5280 	/* Configure for OS presence */
   5281 	wm_init_manageability(sc);
   5282 
   5283 	/*
   5284 	 * Set up the receive control register; we actually program
   5285 	 * the register when we set the receive filter.  Use multicast
   5286 	 * address offset type 0.
   5287 	 *
   5288 	 * Only the i82544 has the ability to strip the incoming
   5289 	 * CRC, so we don't enable that feature.
   5290 	 */
   5291 	sc->sc_mchash_type = 0;
   5292 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5293 	    | RCTL_MO(sc->sc_mchash_type);
   5294 
   5295 	/*
   5296 	 * 82574 use one buffer extended Rx descriptor.
   5297 	 */
   5298 	if (sc->sc_type == WM_T_82574)
   5299 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5300 
   5301 	/*
   5302 	 * The I350 has a bug where it always strips the CRC whether
   5303 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5304 	 */
   5305 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5306 	    || (sc->sc_type == WM_T_I210))
   5307 		sc->sc_rctl |= RCTL_SECRC;
   5308 
   5309 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5310 	    && (ifp->if_mtu > ETHERMTU)) {
   5311 		sc->sc_rctl |= RCTL_LPE;
   5312 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5313 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5314 	}
   5315 
   5316 	if (MCLBYTES == 2048) {
   5317 		sc->sc_rctl |= RCTL_2k;
   5318 	} else {
   5319 		if (sc->sc_type >= WM_T_82543) {
   5320 			switch (MCLBYTES) {
   5321 			case 4096:
   5322 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5323 				break;
   5324 			case 8192:
   5325 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5326 				break;
   5327 			case 16384:
   5328 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5329 				break;
   5330 			default:
   5331 				panic("wm_init: MCLBYTES %d unsupported",
   5332 				    MCLBYTES);
   5333 				break;
   5334 			}
   5335 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5336 	}
   5337 
   5338 	/* Set the receive filter. */
   5339 	wm_set_filter(sc);
   5340 
   5341 	/* Enable ECC */
   5342 	switch (sc->sc_type) {
   5343 	case WM_T_82571:
   5344 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5345 		reg |= PBA_ECC_CORR_EN;
   5346 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5347 		break;
   5348 	case WM_T_PCH_LPT:
   5349 	case WM_T_PCH_SPT:
   5350 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5351 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5352 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5353 
   5354 		sc->sc_ctrl |= CTRL_MEHE;
   5355 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5356 		break;
   5357 	default:
   5358 		break;
   5359 	}
   5360 
   5361 	/* On 575 and later set RDT only if RX enabled */
   5362 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5363 		int qidx;
   5364 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5365 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5366 			for (i = 0; i < WM_NRXDESC; i++) {
   5367 				mutex_enter(rxq->rxq_lock);
   5368 				wm_init_rxdesc(rxq, i);
   5369 				mutex_exit(rxq->rxq_lock);
   5370 
   5371 			}
   5372 		}
   5373 	}
   5374 
   5375 	wm_turnon(sc);
   5376 
   5377 	/* Start the one second link check clock. */
   5378 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5379 
   5380 	/* ...all done! */
   5381 	ifp->if_flags |= IFF_RUNNING;
   5382 	ifp->if_flags &= ~IFF_OACTIVE;
   5383 
   5384  out:
   5385 	sc->sc_if_flags = ifp->if_flags;
   5386 	if (error)
   5387 		log(LOG_ERR, "%s: interface not running\n",
   5388 		    device_xname(sc->sc_dev));
   5389 	return error;
   5390 }
   5391 
   5392 /*
   5393  * wm_stop:		[ifnet interface function]
   5394  *
   5395  *	Stop transmission on the interface.
   5396  */
   5397 static void
   5398 wm_stop(struct ifnet *ifp, int disable)
   5399 {
   5400 	struct wm_softc *sc = ifp->if_softc;
   5401 
   5402 	WM_CORE_LOCK(sc);
   5403 	wm_stop_locked(ifp, disable);
   5404 	WM_CORE_UNLOCK(sc);
   5405 }
   5406 
   5407 static void
   5408 wm_stop_locked(struct ifnet *ifp, int disable)
   5409 {
   5410 	struct wm_softc *sc = ifp->if_softc;
   5411 	struct wm_txsoft *txs;
   5412 	int i, qidx;
   5413 
   5414 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5415 		device_xname(sc->sc_dev), __func__));
   5416 	KASSERT(WM_CORE_LOCKED(sc));
   5417 
   5418 	wm_turnoff(sc);
   5419 
   5420 	/* Stop the one second clock. */
   5421 	callout_stop(&sc->sc_tick_ch);
   5422 
   5423 	/* Stop the 82547 Tx FIFO stall check timer. */
   5424 	if (sc->sc_type == WM_T_82547)
   5425 		callout_stop(&sc->sc_txfifo_ch);
   5426 
   5427 	if (sc->sc_flags & WM_F_HAS_MII) {
   5428 		/* Down the MII. */
   5429 		mii_down(&sc->sc_mii);
   5430 	} else {
   5431 #if 0
   5432 		/* Should we clear PHY's status properly? */
   5433 		wm_reset(sc);
   5434 #endif
   5435 	}
   5436 
   5437 	/* Stop the transmit and receive processes. */
   5438 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5439 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5440 	sc->sc_rctl &= ~RCTL_EN;
   5441 
   5442 	/*
   5443 	 * Clear the interrupt mask to ensure the device cannot assert its
   5444 	 * interrupt line.
   5445 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5446 	 * service any currently pending or shared interrupt.
   5447 	 */
   5448 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5449 	sc->sc_icr = 0;
   5450 	if (sc->sc_nintrs > 1) {
   5451 		if (sc->sc_type != WM_T_82574) {
   5452 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5453 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5454 		} else
   5455 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5456 	}
   5457 
   5458 	/* Release any queued transmit buffers. */
   5459 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5460 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5461 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5462 		mutex_enter(txq->txq_lock);
   5463 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5464 			txs = &txq->txq_soft[i];
   5465 			if (txs->txs_mbuf != NULL) {
   5466 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5467 				m_freem(txs->txs_mbuf);
   5468 				txs->txs_mbuf = NULL;
   5469 			}
   5470 		}
   5471 		mutex_exit(txq->txq_lock);
   5472 	}
   5473 
   5474 	/* Mark the interface as down and cancel the watchdog timer. */
   5475 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5476 	ifp->if_timer = 0;
   5477 
   5478 	if (disable) {
   5479 		for (i = 0; i < sc->sc_nqueues; i++) {
   5480 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5481 			mutex_enter(rxq->rxq_lock);
   5482 			wm_rxdrain(rxq);
   5483 			mutex_exit(rxq->rxq_lock);
   5484 		}
   5485 	}
   5486 
   5487 #if 0 /* notyet */
   5488 	if (sc->sc_type >= WM_T_82544)
   5489 		CSR_WRITE(sc, WMREG_WUC, 0);
   5490 #endif
   5491 }
   5492 
   5493 static void
   5494 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5495 {
   5496 	struct mbuf *m;
   5497 	int i;
   5498 
   5499 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5500 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5501 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5502 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5503 		    m->m_data, m->m_len, m->m_flags);
   5504 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5505 	    i, i == 1 ? "" : "s");
   5506 }
   5507 
   5508 /*
   5509  * wm_82547_txfifo_stall:
   5510  *
   5511  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5512  *	reset the FIFO pointers, and restart packet transmission.
   5513  */
   5514 static void
   5515 wm_82547_txfifo_stall(void *arg)
   5516 {
   5517 	struct wm_softc *sc = arg;
   5518 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5519 
   5520 	mutex_enter(txq->txq_lock);
   5521 
   5522 	if (txq->txq_stopping)
   5523 		goto out;
   5524 
   5525 	if (txq->txq_fifo_stall) {
   5526 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5527 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5528 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5529 			/*
   5530 			 * Packets have drained.  Stop transmitter, reset
   5531 			 * FIFO pointers, restart transmitter, and kick
   5532 			 * the packet queue.
   5533 			 */
   5534 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5535 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5536 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5537 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5538 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5539 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5540 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5541 			CSR_WRITE_FLUSH(sc);
   5542 
   5543 			txq->txq_fifo_head = 0;
   5544 			txq->txq_fifo_stall = 0;
   5545 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5546 		} else {
   5547 			/*
   5548 			 * Still waiting for packets to drain; try again in
   5549 			 * another tick.
   5550 			 */
   5551 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5552 		}
   5553 	}
   5554 
   5555 out:
   5556 	mutex_exit(txq->txq_lock);
   5557 }
   5558 
   5559 /*
   5560  * wm_82547_txfifo_bugchk:
   5561  *
   5562  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5563  *	prevent enqueueing a packet that would wrap around the end
   5564  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5565  *
   5566  *	We do this by checking the amount of space before the end
   5567  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5568  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5569  *	the internal FIFO pointers to the beginning, and restart
   5570  *	transmission on the interface.
   5571  */
   5572 #define	WM_FIFO_HDR		0x10
   5573 #define	WM_82547_PAD_LEN	0x3e0
   5574 static int
   5575 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5576 {
   5577 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5578 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5579 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5580 
   5581 	/* Just return if already stalled. */
   5582 	if (txq->txq_fifo_stall)
   5583 		return 1;
   5584 
   5585 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5586 		/* Stall only occurs in half-duplex mode. */
   5587 		goto send_packet;
   5588 	}
   5589 
   5590 	if (len >= WM_82547_PAD_LEN + space) {
   5591 		txq->txq_fifo_stall = 1;
   5592 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5593 		return 1;
   5594 	}
   5595 
   5596  send_packet:
   5597 	txq->txq_fifo_head += len;
   5598 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5599 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5600 
   5601 	return 0;
   5602 }
   5603 
   5604 static int
   5605 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5606 {
   5607 	int error;
   5608 
   5609 	/*
   5610 	 * Allocate the control data structures, and create and load the
   5611 	 * DMA map for it.
   5612 	 *
   5613 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5614 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5615 	 * both sets within the same 4G segment.
   5616 	 */
   5617 	if (sc->sc_type < WM_T_82544)
   5618 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5619 	else
   5620 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5621 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5622 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5623 	else
   5624 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5625 
   5626 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5627 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5628 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5629 		aprint_error_dev(sc->sc_dev,
   5630 		    "unable to allocate TX control data, error = %d\n",
   5631 		    error);
   5632 		goto fail_0;
   5633 	}
   5634 
   5635 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5636 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5637 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5638 		aprint_error_dev(sc->sc_dev,
   5639 		    "unable to map TX control data, error = %d\n", error);
   5640 		goto fail_1;
   5641 	}
   5642 
   5643 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5644 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5645 		aprint_error_dev(sc->sc_dev,
   5646 		    "unable to create TX control data DMA map, error = %d\n",
   5647 		    error);
   5648 		goto fail_2;
   5649 	}
   5650 
   5651 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5652 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5653 		aprint_error_dev(sc->sc_dev,
   5654 		    "unable to load TX control data DMA map, error = %d\n",
   5655 		    error);
   5656 		goto fail_3;
   5657 	}
   5658 
   5659 	return 0;
   5660 
   5661  fail_3:
   5662 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5663  fail_2:
   5664 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5665 	    WM_TXDESCS_SIZE(txq));
   5666  fail_1:
   5667 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5668  fail_0:
   5669 	return error;
   5670 }
   5671 
   5672 static void
   5673 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5674 {
   5675 
   5676 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5677 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5678 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5679 	    WM_TXDESCS_SIZE(txq));
   5680 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5681 }
   5682 
   5683 static int
   5684 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5685 {
   5686 	int error;
   5687 	size_t rxq_descs_size;
   5688 
   5689 	/*
   5690 	 * Allocate the control data structures, and create and load the
   5691 	 * DMA map for it.
   5692 	 *
   5693 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5694 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5695 	 * both sets within the same 4G segment.
   5696 	 */
   5697 	rxq->rxq_ndesc = WM_NRXDESC;
   5698 	if (sc->sc_type == WM_T_82574)
   5699 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   5700 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5701 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   5702 	else
   5703 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   5704 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   5705 
   5706 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   5707 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5708 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5709 		aprint_error_dev(sc->sc_dev,
   5710 		    "unable to allocate RX control data, error = %d\n",
   5711 		    error);
   5712 		goto fail_0;
   5713 	}
   5714 
   5715 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5716 		    rxq->rxq_desc_rseg, rxq_descs_size,
   5717 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5718 		aprint_error_dev(sc->sc_dev,
   5719 		    "unable to map RX control data, error = %d\n", error);
   5720 		goto fail_1;
   5721 	}
   5722 
   5723 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   5724 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5725 		aprint_error_dev(sc->sc_dev,
   5726 		    "unable to create RX control data DMA map, error = %d\n",
   5727 		    error);
   5728 		goto fail_2;
   5729 	}
   5730 
   5731 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5732 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   5733 		aprint_error_dev(sc->sc_dev,
   5734 		    "unable to load RX control data DMA map, error = %d\n",
   5735 		    error);
   5736 		goto fail_3;
   5737 	}
   5738 
   5739 	return 0;
   5740 
   5741  fail_3:
   5742 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5743  fail_2:
   5744 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5745 	    rxq_descs_size);
   5746  fail_1:
   5747 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5748  fail_0:
   5749 	return error;
   5750 }
   5751 
   5752 static void
   5753 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5754 {
   5755 
   5756 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5757 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5758 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5759 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   5760 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5761 }
   5762 
   5763 
   5764 static int
   5765 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5766 {
   5767 	int i, error;
   5768 
   5769 	/* Create the transmit buffer DMA maps. */
   5770 	WM_TXQUEUELEN(txq) =
   5771 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5772 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5773 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5774 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5775 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5776 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5777 			aprint_error_dev(sc->sc_dev,
   5778 			    "unable to create Tx DMA map %d, error = %d\n",
   5779 			    i, error);
   5780 			goto fail;
   5781 		}
   5782 	}
   5783 
   5784 	return 0;
   5785 
   5786  fail:
   5787 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5788 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5789 			bus_dmamap_destroy(sc->sc_dmat,
   5790 			    txq->txq_soft[i].txs_dmamap);
   5791 	}
   5792 	return error;
   5793 }
   5794 
   5795 static void
   5796 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5797 {
   5798 	int i;
   5799 
   5800 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5801 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5802 			bus_dmamap_destroy(sc->sc_dmat,
   5803 			    txq->txq_soft[i].txs_dmamap);
   5804 	}
   5805 }
   5806 
   5807 static int
   5808 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5809 {
   5810 	int i, error;
   5811 
   5812 	/* Create the receive buffer DMA maps. */
   5813 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5814 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5815 			    MCLBYTES, 0, 0,
   5816 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5817 			aprint_error_dev(sc->sc_dev,
   5818 			    "unable to create Rx DMA map %d error = %d\n",
   5819 			    i, error);
   5820 			goto fail;
   5821 		}
   5822 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5823 	}
   5824 
   5825 	return 0;
   5826 
   5827  fail:
   5828 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5829 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5830 			bus_dmamap_destroy(sc->sc_dmat,
   5831 			    rxq->rxq_soft[i].rxs_dmamap);
   5832 	}
   5833 	return error;
   5834 }
   5835 
   5836 static void
   5837 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5838 {
   5839 	int i;
   5840 
   5841 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5842 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5843 			bus_dmamap_destroy(sc->sc_dmat,
   5844 			    rxq->rxq_soft[i].rxs_dmamap);
   5845 	}
   5846 }
   5847 
   5848 /*
   5849  * wm_alloc_quques:
   5850  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5851  */
   5852 static int
   5853 wm_alloc_txrx_queues(struct wm_softc *sc)
   5854 {
   5855 	int i, error, tx_done, rx_done;
   5856 
   5857 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5858 	    KM_SLEEP);
   5859 	if (sc->sc_queue == NULL) {
   5860 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5861 		error = ENOMEM;
   5862 		goto fail_0;
   5863 	}
   5864 
   5865 	/*
   5866 	 * For transmission
   5867 	 */
   5868 	error = 0;
   5869 	tx_done = 0;
   5870 	for (i = 0; i < sc->sc_nqueues; i++) {
   5871 #ifdef WM_EVENT_COUNTERS
   5872 		int j;
   5873 		const char *xname;
   5874 #endif
   5875 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5876 		txq->txq_sc = sc;
   5877 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5878 
   5879 		error = wm_alloc_tx_descs(sc, txq);
   5880 		if (error)
   5881 			break;
   5882 		error = wm_alloc_tx_buffer(sc, txq);
   5883 		if (error) {
   5884 			wm_free_tx_descs(sc, txq);
   5885 			break;
   5886 		}
   5887 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5888 		if (txq->txq_interq == NULL) {
   5889 			wm_free_tx_descs(sc, txq);
   5890 			wm_free_tx_buffer(sc, txq);
   5891 			error = ENOMEM;
   5892 			break;
   5893 		}
   5894 
   5895 #ifdef WM_EVENT_COUNTERS
   5896 		xname = device_xname(sc->sc_dev);
   5897 
   5898 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5899 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5900 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5901 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5902 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5903 
   5904 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5905 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5906 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5907 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5908 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5909 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5910 
   5911 		for (j = 0; j < WM_NTXSEGS; j++) {
   5912 			snprintf(txq->txq_txseg_evcnt_names[j],
   5913 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   5914 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   5915 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   5916 		}
   5917 
   5918 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   5919 
   5920 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   5921 #endif /* WM_EVENT_COUNTERS */
   5922 
   5923 		tx_done++;
   5924 	}
   5925 	if (error)
   5926 		goto fail_1;
   5927 
   5928 	/*
   5929 	 * For recieve
   5930 	 */
   5931 	error = 0;
   5932 	rx_done = 0;
   5933 	for (i = 0; i < sc->sc_nqueues; i++) {
   5934 #ifdef WM_EVENT_COUNTERS
   5935 		const char *xname;
   5936 #endif
   5937 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5938 		rxq->rxq_sc = sc;
   5939 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5940 
   5941 		error = wm_alloc_rx_descs(sc, rxq);
   5942 		if (error)
   5943 			break;
   5944 
   5945 		error = wm_alloc_rx_buffer(sc, rxq);
   5946 		if (error) {
   5947 			wm_free_rx_descs(sc, rxq);
   5948 			break;
   5949 		}
   5950 
   5951 #ifdef WM_EVENT_COUNTERS
   5952 		xname = device_xname(sc->sc_dev);
   5953 
   5954 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   5955 
   5956 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   5957 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   5958 #endif /* WM_EVENT_COUNTERS */
   5959 
   5960 		rx_done++;
   5961 	}
   5962 	if (error)
   5963 		goto fail_2;
   5964 
   5965 	return 0;
   5966 
   5967  fail_2:
   5968 	for (i = 0; i < rx_done; i++) {
   5969 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5970 		wm_free_rx_buffer(sc, rxq);
   5971 		wm_free_rx_descs(sc, rxq);
   5972 		if (rxq->rxq_lock)
   5973 			mutex_obj_free(rxq->rxq_lock);
   5974 	}
   5975  fail_1:
   5976 	for (i = 0; i < tx_done; i++) {
   5977 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5978 		pcq_destroy(txq->txq_interq);
   5979 		wm_free_tx_buffer(sc, txq);
   5980 		wm_free_tx_descs(sc, txq);
   5981 		if (txq->txq_lock)
   5982 			mutex_obj_free(txq->txq_lock);
   5983 	}
   5984 
   5985 	kmem_free(sc->sc_queue,
   5986 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5987  fail_0:
   5988 	return error;
   5989 }
   5990 
   5991 /*
   5992  * wm_free_quques:
   5993  *	Free {tx,rx}descs and {tx,rx} buffers
   5994  */
   5995 static void
   5996 wm_free_txrx_queues(struct wm_softc *sc)
   5997 {
   5998 	int i;
   5999 
   6000 	for (i = 0; i < sc->sc_nqueues; i++) {
   6001 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6002 
   6003 #ifdef WM_EVENT_COUNTERS
   6004 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6005 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6006 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6007 #endif /* WM_EVENT_COUNTERS */
   6008 
   6009 		wm_free_rx_buffer(sc, rxq);
   6010 		wm_free_rx_descs(sc, rxq);
   6011 		if (rxq->rxq_lock)
   6012 			mutex_obj_free(rxq->rxq_lock);
   6013 	}
   6014 
   6015 	for (i = 0; i < sc->sc_nqueues; i++) {
   6016 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6017 		struct mbuf *m;
   6018 #ifdef WM_EVENT_COUNTERS
   6019 		int j;
   6020 
   6021 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6022 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6023 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6024 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6025 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6026 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6027 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6028 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6029 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6030 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6031 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6032 
   6033 		for (j = 0; j < WM_NTXSEGS; j++)
   6034 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6035 
   6036 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6037 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6038 #endif /* WM_EVENT_COUNTERS */
   6039 
   6040 		/* drain txq_interq */
   6041 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6042 			m_freem(m);
   6043 		pcq_destroy(txq->txq_interq);
   6044 
   6045 		wm_free_tx_buffer(sc, txq);
   6046 		wm_free_tx_descs(sc, txq);
   6047 		if (txq->txq_lock)
   6048 			mutex_obj_free(txq->txq_lock);
   6049 	}
   6050 
   6051 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6052 }
   6053 
   6054 static void
   6055 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6056 {
   6057 
   6058 	KASSERT(mutex_owned(txq->txq_lock));
   6059 
   6060 	/* Initialize the transmit descriptor ring. */
   6061 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6062 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6063 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6064 	txq->txq_free = WM_NTXDESC(txq);
   6065 	txq->txq_next = 0;
   6066 }
   6067 
   6068 static void
   6069 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6070     struct wm_txqueue *txq)
   6071 {
   6072 
   6073 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6074 		device_xname(sc->sc_dev), __func__));
   6075 	KASSERT(mutex_owned(txq->txq_lock));
   6076 
   6077 	if (sc->sc_type < WM_T_82543) {
   6078 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6079 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6080 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6081 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6082 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6083 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6084 	} else {
   6085 		int qid = wmq->wmq_id;
   6086 
   6087 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6088 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6089 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6090 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6091 
   6092 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6093 			/*
   6094 			 * Don't write TDT before TCTL.EN is set.
   6095 			 * See the document.
   6096 			 */
   6097 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6098 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6099 			    | TXDCTL_WTHRESH(0));
   6100 		else {
   6101 			/* XXX should update with AIM? */
   6102 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6103 			if (sc->sc_type >= WM_T_82540) {
   6104 				/* should be same */
   6105 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6106 			}
   6107 
   6108 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6109 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6110 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6111 		}
   6112 	}
   6113 }
   6114 
   6115 static void
   6116 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6117 {
   6118 	int i;
   6119 
   6120 	KASSERT(mutex_owned(txq->txq_lock));
   6121 
   6122 	/* Initialize the transmit job descriptors. */
   6123 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6124 		txq->txq_soft[i].txs_mbuf = NULL;
   6125 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6126 	txq->txq_snext = 0;
   6127 	txq->txq_sdirty = 0;
   6128 }
   6129 
   6130 static void
   6131 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6132     struct wm_txqueue *txq)
   6133 {
   6134 
   6135 	KASSERT(mutex_owned(txq->txq_lock));
   6136 
   6137 	/*
   6138 	 * Set up some register offsets that are different between
   6139 	 * the i82542 and the i82543 and later chips.
   6140 	 */
   6141 	if (sc->sc_type < WM_T_82543)
   6142 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6143 	else
   6144 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6145 
   6146 	wm_init_tx_descs(sc, txq);
   6147 	wm_init_tx_regs(sc, wmq, txq);
   6148 	wm_init_tx_buffer(sc, txq);
   6149 }
   6150 
   6151 static void
   6152 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6153     struct wm_rxqueue *rxq)
   6154 {
   6155 
   6156 	KASSERT(mutex_owned(rxq->rxq_lock));
   6157 
   6158 	/*
   6159 	 * Initialize the receive descriptor and receive job
   6160 	 * descriptor rings.
   6161 	 */
   6162 	if (sc->sc_type < WM_T_82543) {
   6163 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6164 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6165 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6166 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6167 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6168 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6169 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6170 
   6171 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6172 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6173 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6174 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6175 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6176 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6177 	} else {
   6178 		int qid = wmq->wmq_id;
   6179 
   6180 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6181 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6182 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6183 
   6184 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6185 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6186 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6187 
   6188 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6189 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6190 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6191 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6192 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6193 			    | RXDCTL_WTHRESH(1));
   6194 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6195 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6196 		} else {
   6197 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6198 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6199 			/* XXX should update with AIM? */
   6200 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6201 			/* MUST be same */
   6202 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6203 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6204 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6205 		}
   6206 	}
   6207 }
   6208 
   6209 static int
   6210 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6211 {
   6212 	struct wm_rxsoft *rxs;
   6213 	int error, i;
   6214 
   6215 	KASSERT(mutex_owned(rxq->rxq_lock));
   6216 
   6217 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6218 		rxs = &rxq->rxq_soft[i];
   6219 		if (rxs->rxs_mbuf == NULL) {
   6220 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6221 				log(LOG_ERR, "%s: unable to allocate or map "
   6222 				    "rx buffer %d, error = %d\n",
   6223 				    device_xname(sc->sc_dev), i, error);
   6224 				/*
   6225 				 * XXX Should attempt to run with fewer receive
   6226 				 * XXX buffers instead of just failing.
   6227 				 */
   6228 				wm_rxdrain(rxq);
   6229 				return ENOMEM;
   6230 			}
   6231 		} else {
   6232 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6233 				wm_init_rxdesc(rxq, i);
   6234 			/*
   6235 			 * For 82575 and newer device, the RX descriptors
   6236 			 * must be initialized after the setting of RCTL.EN in
   6237 			 * wm_set_filter()
   6238 			 */
   6239 		}
   6240 	}
   6241 	rxq->rxq_ptr = 0;
   6242 	rxq->rxq_discard = 0;
   6243 	WM_RXCHAIN_RESET(rxq);
   6244 
   6245 	return 0;
   6246 }
   6247 
   6248 static int
   6249 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6250     struct wm_rxqueue *rxq)
   6251 {
   6252 
   6253 	KASSERT(mutex_owned(rxq->rxq_lock));
   6254 
   6255 	/*
   6256 	 * Set up some register offsets that are different between
   6257 	 * the i82542 and the i82543 and later chips.
   6258 	 */
   6259 	if (sc->sc_type < WM_T_82543)
   6260 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6261 	else
   6262 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6263 
   6264 	wm_init_rx_regs(sc, wmq, rxq);
   6265 	return wm_init_rx_buffer(sc, rxq);
   6266 }
   6267 
   6268 /*
   6269  * wm_init_quques:
   6270  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6271  */
   6272 static int
   6273 wm_init_txrx_queues(struct wm_softc *sc)
   6274 {
   6275 	int i, error = 0;
   6276 
   6277 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6278 		device_xname(sc->sc_dev), __func__));
   6279 
   6280 	for (i = 0; i < sc->sc_nqueues; i++) {
   6281 		struct wm_queue *wmq = &sc->sc_queue[i];
   6282 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6283 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6284 
   6285 		wmq->wmq_itr = sc->sc_itr_init;
   6286 
   6287 		mutex_enter(txq->txq_lock);
   6288 		wm_init_tx_queue(sc, wmq, txq);
   6289 		mutex_exit(txq->txq_lock);
   6290 
   6291 		mutex_enter(rxq->rxq_lock);
   6292 		error = wm_init_rx_queue(sc, wmq, rxq);
   6293 		mutex_exit(rxq->rxq_lock);
   6294 		if (error)
   6295 			break;
   6296 	}
   6297 
   6298 	return error;
   6299 }
   6300 
   6301 /*
   6302  * wm_tx_offload:
   6303  *
   6304  *	Set up TCP/IP checksumming parameters for the
   6305  *	specified packet.
   6306  */
   6307 static int
   6308 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   6309     uint8_t *fieldsp)
   6310 {
   6311 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6312 	struct mbuf *m0 = txs->txs_mbuf;
   6313 	struct livengood_tcpip_ctxdesc *t;
   6314 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6315 	uint32_t ipcse;
   6316 	struct ether_header *eh;
   6317 	int offset, iphl;
   6318 	uint8_t fields;
   6319 
   6320 	/*
   6321 	 * XXX It would be nice if the mbuf pkthdr had offset
   6322 	 * fields for the protocol headers.
   6323 	 */
   6324 
   6325 	eh = mtod(m0, struct ether_header *);
   6326 	switch (htons(eh->ether_type)) {
   6327 	case ETHERTYPE_IP:
   6328 	case ETHERTYPE_IPV6:
   6329 		offset = ETHER_HDR_LEN;
   6330 		break;
   6331 
   6332 	case ETHERTYPE_VLAN:
   6333 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6334 		break;
   6335 
   6336 	default:
   6337 		/*
   6338 		 * Don't support this protocol or encapsulation.
   6339 		 */
   6340 		*fieldsp = 0;
   6341 		*cmdp = 0;
   6342 		return 0;
   6343 	}
   6344 
   6345 	if ((m0->m_pkthdr.csum_flags &
   6346 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6347 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6348 	} else {
   6349 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6350 	}
   6351 	ipcse = offset + iphl - 1;
   6352 
   6353 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6354 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6355 	seg = 0;
   6356 	fields = 0;
   6357 
   6358 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6359 		int hlen = offset + iphl;
   6360 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6361 
   6362 		if (__predict_false(m0->m_len <
   6363 				    (hlen + sizeof(struct tcphdr)))) {
   6364 			/*
   6365 			 * TCP/IP headers are not in the first mbuf; we need
   6366 			 * to do this the slow and painful way.  Let's just
   6367 			 * hope this doesn't happen very often.
   6368 			 */
   6369 			struct tcphdr th;
   6370 
   6371 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6372 
   6373 			m_copydata(m0, hlen, sizeof(th), &th);
   6374 			if (v4) {
   6375 				struct ip ip;
   6376 
   6377 				m_copydata(m0, offset, sizeof(ip), &ip);
   6378 				ip.ip_len = 0;
   6379 				m_copyback(m0,
   6380 				    offset + offsetof(struct ip, ip_len),
   6381 				    sizeof(ip.ip_len), &ip.ip_len);
   6382 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6383 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6384 			} else {
   6385 				struct ip6_hdr ip6;
   6386 
   6387 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6388 				ip6.ip6_plen = 0;
   6389 				m_copyback(m0,
   6390 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6391 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6392 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6393 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6394 			}
   6395 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6396 			    sizeof(th.th_sum), &th.th_sum);
   6397 
   6398 			hlen += th.th_off << 2;
   6399 		} else {
   6400 			/*
   6401 			 * TCP/IP headers are in the first mbuf; we can do
   6402 			 * this the easy way.
   6403 			 */
   6404 			struct tcphdr *th;
   6405 
   6406 			if (v4) {
   6407 				struct ip *ip =
   6408 				    (void *)(mtod(m0, char *) + offset);
   6409 				th = (void *)(mtod(m0, char *) + hlen);
   6410 
   6411 				ip->ip_len = 0;
   6412 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6413 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6414 			} else {
   6415 				struct ip6_hdr *ip6 =
   6416 				    (void *)(mtod(m0, char *) + offset);
   6417 				th = (void *)(mtod(m0, char *) + hlen);
   6418 
   6419 				ip6->ip6_plen = 0;
   6420 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6421 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6422 			}
   6423 			hlen += th->th_off << 2;
   6424 		}
   6425 
   6426 		if (v4) {
   6427 			WM_Q_EVCNT_INCR(txq, txtso);
   6428 			cmdlen |= WTX_TCPIP_CMD_IP;
   6429 		} else {
   6430 			WM_Q_EVCNT_INCR(txq, txtso6);
   6431 			ipcse = 0;
   6432 		}
   6433 		cmd |= WTX_TCPIP_CMD_TSE;
   6434 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6435 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6436 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6437 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6438 	}
   6439 
   6440 	/*
   6441 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6442 	 * offload feature, if we load the context descriptor, we
   6443 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6444 	 */
   6445 
   6446 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6447 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6448 	    WTX_TCPIP_IPCSE(ipcse);
   6449 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6450 		WM_Q_EVCNT_INCR(txq, txipsum);
   6451 		fields |= WTX_IXSM;
   6452 	}
   6453 
   6454 	offset += iphl;
   6455 
   6456 	if (m0->m_pkthdr.csum_flags &
   6457 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6458 		WM_Q_EVCNT_INCR(txq, txtusum);
   6459 		fields |= WTX_TXSM;
   6460 		tucs = WTX_TCPIP_TUCSS(offset) |
   6461 		    WTX_TCPIP_TUCSO(offset +
   6462 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6463 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6464 	} else if ((m0->m_pkthdr.csum_flags &
   6465 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6466 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6467 		fields |= WTX_TXSM;
   6468 		tucs = WTX_TCPIP_TUCSS(offset) |
   6469 		    WTX_TCPIP_TUCSO(offset +
   6470 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6471 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6472 	} else {
   6473 		/* Just initialize it to a valid TCP context. */
   6474 		tucs = WTX_TCPIP_TUCSS(offset) |
   6475 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6476 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6477 	}
   6478 
   6479 	/* Fill in the context descriptor. */
   6480 	t = (struct livengood_tcpip_ctxdesc *)
   6481 	    &txq->txq_descs[txq->txq_next];
   6482 	t->tcpip_ipcs = htole32(ipcs);
   6483 	t->tcpip_tucs = htole32(tucs);
   6484 	t->tcpip_cmdlen = htole32(cmdlen);
   6485 	t->tcpip_seg = htole32(seg);
   6486 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6487 
   6488 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6489 	txs->txs_ndesc++;
   6490 
   6491 	*cmdp = cmd;
   6492 	*fieldsp = fields;
   6493 
   6494 	return 0;
   6495 }
   6496 
   6497 static inline int
   6498 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6499 {
   6500 	struct wm_softc *sc = ifp->if_softc;
   6501 	u_int cpuid = cpu_index(curcpu());
   6502 
   6503 	/*
   6504 	 * Currently, simple distribute strategy.
   6505 	 * TODO:
   6506 	 * distribute by flowid(RSS has value).
   6507 	 */
   6508         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6509 }
   6510 
   6511 /*
   6512  * wm_start:		[ifnet interface function]
   6513  *
   6514  *	Start packet transmission on the interface.
   6515  */
   6516 static void
   6517 wm_start(struct ifnet *ifp)
   6518 {
   6519 	struct wm_softc *sc = ifp->if_softc;
   6520 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6521 
   6522 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6523 
   6524 	/*
   6525 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6526 	 */
   6527 
   6528 	mutex_enter(txq->txq_lock);
   6529 	if (!txq->txq_stopping)
   6530 		wm_start_locked(ifp);
   6531 	mutex_exit(txq->txq_lock);
   6532 }
   6533 
   6534 static void
   6535 wm_start_locked(struct ifnet *ifp)
   6536 {
   6537 	struct wm_softc *sc = ifp->if_softc;
   6538 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6539 
   6540 	wm_send_common_locked(ifp, txq, false);
   6541 }
   6542 
   6543 static int
   6544 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6545 {
   6546 	int qid;
   6547 	struct wm_softc *sc = ifp->if_softc;
   6548 	struct wm_txqueue *txq;
   6549 
   6550 	qid = wm_select_txqueue(ifp, m);
   6551 	txq = &sc->sc_queue[qid].wmq_txq;
   6552 
   6553 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6554 		m_freem(m);
   6555 		WM_Q_EVCNT_INCR(txq, txdrop);
   6556 		return ENOBUFS;
   6557 	}
   6558 
   6559 	/*
   6560 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6561 	 */
   6562 	ifp->if_obytes += m->m_pkthdr.len;
   6563 	if (m->m_flags & M_MCAST)
   6564 		ifp->if_omcasts++;
   6565 
   6566 	if (mutex_tryenter(txq->txq_lock)) {
   6567 		if (!txq->txq_stopping)
   6568 			wm_transmit_locked(ifp, txq);
   6569 		mutex_exit(txq->txq_lock);
   6570 	}
   6571 
   6572 	return 0;
   6573 }
   6574 
   6575 static void
   6576 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6577 {
   6578 
   6579 	wm_send_common_locked(ifp, txq, true);
   6580 }
   6581 
   6582 static void
   6583 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6584     bool is_transmit)
   6585 {
   6586 	struct wm_softc *sc = ifp->if_softc;
   6587 	struct mbuf *m0;
   6588 	struct m_tag *mtag;
   6589 	struct wm_txsoft *txs;
   6590 	bus_dmamap_t dmamap;
   6591 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6592 	bus_addr_t curaddr;
   6593 	bus_size_t seglen, curlen;
   6594 	uint32_t cksumcmd;
   6595 	uint8_t cksumfields;
   6596 
   6597 	KASSERT(mutex_owned(txq->txq_lock));
   6598 
   6599 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   6600 		return;
   6601 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   6602 		return;
   6603 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6604 		return;
   6605 
   6606 	/* Remember the previous number of free descriptors. */
   6607 	ofree = txq->txq_free;
   6608 
   6609 	/*
   6610 	 * Loop through the send queue, setting up transmit descriptors
   6611 	 * until we drain the queue, or use up all available transmit
   6612 	 * descriptors.
   6613 	 */
   6614 	for (;;) {
   6615 		m0 = NULL;
   6616 
   6617 		/* Get a work queue entry. */
   6618 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6619 			wm_txeof(sc, txq);
   6620 			if (txq->txq_sfree == 0) {
   6621 				DPRINTF(WM_DEBUG_TX,
   6622 				    ("%s: TX: no free job descriptors\n",
   6623 					device_xname(sc->sc_dev)));
   6624 				WM_Q_EVCNT_INCR(txq, txsstall);
   6625 				break;
   6626 			}
   6627 		}
   6628 
   6629 		/* Grab a packet off the queue. */
   6630 		if (is_transmit)
   6631 			m0 = pcq_get(txq->txq_interq);
   6632 		else
   6633 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6634 		if (m0 == NULL)
   6635 			break;
   6636 
   6637 		DPRINTF(WM_DEBUG_TX,
   6638 		    ("%s: TX: have packet to transmit: %p\n",
   6639 		    device_xname(sc->sc_dev), m0));
   6640 
   6641 		txs = &txq->txq_soft[txq->txq_snext];
   6642 		dmamap = txs->txs_dmamap;
   6643 
   6644 		use_tso = (m0->m_pkthdr.csum_flags &
   6645 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6646 
   6647 		/*
   6648 		 * So says the Linux driver:
   6649 		 * The controller does a simple calculation to make sure
   6650 		 * there is enough room in the FIFO before initiating the
   6651 		 * DMA for each buffer.  The calc is:
   6652 		 *	4 = ceil(buffer len / MSS)
   6653 		 * To make sure we don't overrun the FIFO, adjust the max
   6654 		 * buffer len if the MSS drops.
   6655 		 */
   6656 		dmamap->dm_maxsegsz =
   6657 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6658 		    ? m0->m_pkthdr.segsz << 2
   6659 		    : WTX_MAX_LEN;
   6660 
   6661 		/*
   6662 		 * Load the DMA map.  If this fails, the packet either
   6663 		 * didn't fit in the allotted number of segments, or we
   6664 		 * were short on resources.  For the too-many-segments
   6665 		 * case, we simply report an error and drop the packet,
   6666 		 * since we can't sanely copy a jumbo packet to a single
   6667 		 * buffer.
   6668 		 */
   6669 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6670 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6671 		if (error) {
   6672 			if (error == EFBIG) {
   6673 				WM_Q_EVCNT_INCR(txq, txdrop);
   6674 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6675 				    "DMA segments, dropping...\n",
   6676 				    device_xname(sc->sc_dev));
   6677 				wm_dump_mbuf_chain(sc, m0);
   6678 				m_freem(m0);
   6679 				continue;
   6680 			}
   6681 			/*  Short on resources, just stop for now. */
   6682 			DPRINTF(WM_DEBUG_TX,
   6683 			    ("%s: TX: dmamap load failed: %d\n",
   6684 			    device_xname(sc->sc_dev), error));
   6685 			break;
   6686 		}
   6687 
   6688 		segs_needed = dmamap->dm_nsegs;
   6689 		if (use_tso) {
   6690 			/* For sentinel descriptor; see below. */
   6691 			segs_needed++;
   6692 		}
   6693 
   6694 		/*
   6695 		 * Ensure we have enough descriptors free to describe
   6696 		 * the packet.  Note, we always reserve one descriptor
   6697 		 * at the end of the ring due to the semantics of the
   6698 		 * TDT register, plus one more in the event we need
   6699 		 * to load offload context.
   6700 		 */
   6701 		if (segs_needed > txq->txq_free - 2) {
   6702 			/*
   6703 			 * Not enough free descriptors to transmit this
   6704 			 * packet.  We haven't committed anything yet,
   6705 			 * so just unload the DMA map, put the packet
   6706 			 * pack on the queue, and punt.  Notify the upper
   6707 			 * layer that there are no more slots left.
   6708 			 */
   6709 			DPRINTF(WM_DEBUG_TX,
   6710 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6711 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6712 			    segs_needed, txq->txq_free - 1));
   6713 			if (!is_transmit)
   6714 				ifp->if_flags |= IFF_OACTIVE;
   6715 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6716 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6717 			WM_Q_EVCNT_INCR(txq, txdstall);
   6718 			break;
   6719 		}
   6720 
   6721 		/*
   6722 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6723 		 * once we know we can transmit the packet, since we
   6724 		 * do some internal FIFO space accounting here.
   6725 		 */
   6726 		if (sc->sc_type == WM_T_82547 &&
   6727 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6728 			DPRINTF(WM_DEBUG_TX,
   6729 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6730 			    device_xname(sc->sc_dev)));
   6731 			if (!is_transmit)
   6732 				ifp->if_flags |= IFF_OACTIVE;
   6733 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6734 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6735 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6736 			break;
   6737 		}
   6738 
   6739 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6740 
   6741 		DPRINTF(WM_DEBUG_TX,
   6742 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6743 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6744 
   6745 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6746 
   6747 		/*
   6748 		 * Store a pointer to the packet so that we can free it
   6749 		 * later.
   6750 		 *
   6751 		 * Initially, we consider the number of descriptors the
   6752 		 * packet uses the number of DMA segments.  This may be
   6753 		 * incremented by 1 if we do checksum offload (a descriptor
   6754 		 * is used to set the checksum context).
   6755 		 */
   6756 		txs->txs_mbuf = m0;
   6757 		txs->txs_firstdesc = txq->txq_next;
   6758 		txs->txs_ndesc = segs_needed;
   6759 
   6760 		/* Set up offload parameters for this packet. */
   6761 		if (m0->m_pkthdr.csum_flags &
   6762 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6763 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6764 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6765 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6766 					  &cksumfields) != 0) {
   6767 				/* Error message already displayed. */
   6768 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6769 				continue;
   6770 			}
   6771 		} else {
   6772 			cksumcmd = 0;
   6773 			cksumfields = 0;
   6774 		}
   6775 
   6776 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6777 
   6778 		/* Sync the DMA map. */
   6779 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6780 		    BUS_DMASYNC_PREWRITE);
   6781 
   6782 		/* Initialize the transmit descriptor. */
   6783 		for (nexttx = txq->txq_next, seg = 0;
   6784 		     seg < dmamap->dm_nsegs; seg++) {
   6785 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6786 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6787 			     seglen != 0;
   6788 			     curaddr += curlen, seglen -= curlen,
   6789 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6790 				curlen = seglen;
   6791 
   6792 				/*
   6793 				 * So says the Linux driver:
   6794 				 * Work around for premature descriptor
   6795 				 * write-backs in TSO mode.  Append a
   6796 				 * 4-byte sentinel descriptor.
   6797 				 */
   6798 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6799 				    curlen > 8)
   6800 					curlen -= 4;
   6801 
   6802 				wm_set_dma_addr(
   6803 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6804 				txq->txq_descs[nexttx].wtx_cmdlen
   6805 				    = htole32(cksumcmd | curlen);
   6806 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6807 				    = 0;
   6808 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6809 				    = cksumfields;
   6810 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6811 				lasttx = nexttx;
   6812 
   6813 				DPRINTF(WM_DEBUG_TX,
   6814 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6815 				     "len %#04zx\n",
   6816 				    device_xname(sc->sc_dev), nexttx,
   6817 				    (uint64_t)curaddr, curlen));
   6818 			}
   6819 		}
   6820 
   6821 		KASSERT(lasttx != -1);
   6822 
   6823 		/*
   6824 		 * Set up the command byte on the last descriptor of
   6825 		 * the packet.  If we're in the interrupt delay window,
   6826 		 * delay the interrupt.
   6827 		 */
   6828 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6829 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6830 
   6831 		/*
   6832 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6833 		 * up the descriptor to encapsulate the packet for us.
   6834 		 *
   6835 		 * This is only valid on the last descriptor of the packet.
   6836 		 */
   6837 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6838 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6839 			    htole32(WTX_CMD_VLE);
   6840 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6841 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6842 		}
   6843 
   6844 		txs->txs_lastdesc = lasttx;
   6845 
   6846 		DPRINTF(WM_DEBUG_TX,
   6847 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6848 		    device_xname(sc->sc_dev),
   6849 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6850 
   6851 		/* Sync the descriptors we're using. */
   6852 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6853 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6854 
   6855 		/* Give the packet to the chip. */
   6856 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6857 
   6858 		DPRINTF(WM_DEBUG_TX,
   6859 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6860 
   6861 		DPRINTF(WM_DEBUG_TX,
   6862 		    ("%s: TX: finished transmitting packet, job %d\n",
   6863 		    device_xname(sc->sc_dev), txq->txq_snext));
   6864 
   6865 		/* Advance the tx pointer. */
   6866 		txq->txq_free -= txs->txs_ndesc;
   6867 		txq->txq_next = nexttx;
   6868 
   6869 		txq->txq_sfree--;
   6870 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6871 
   6872 		/* Pass the packet to any BPF listeners. */
   6873 		bpf_mtap(ifp, m0);
   6874 	}
   6875 
   6876 	if (m0 != NULL) {
   6877 		if (!is_transmit)
   6878 			ifp->if_flags |= IFF_OACTIVE;
   6879 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   6880 		WM_Q_EVCNT_INCR(txq, txdrop);
   6881 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6882 			__func__));
   6883 		m_freem(m0);
   6884 	}
   6885 
   6886 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6887 		/* No more slots; notify upper layer. */
   6888 		if (!is_transmit)
   6889 			ifp->if_flags |= IFF_OACTIVE;
   6890 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   6891 	}
   6892 
   6893 	if (txq->txq_free != ofree) {
   6894 		/* Set a watchdog timer in case the chip flakes out. */
   6895 		ifp->if_timer = 5;
   6896 	}
   6897 }
   6898 
   6899 /*
   6900  * wm_nq_tx_offload:
   6901  *
   6902  *	Set up TCP/IP checksumming parameters for the
   6903  *	specified packet, for NEWQUEUE devices
   6904  */
   6905 static int
   6906 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6907     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6908 {
   6909 	struct mbuf *m0 = txs->txs_mbuf;
   6910 	struct m_tag *mtag;
   6911 	uint32_t vl_len, mssidx, cmdc;
   6912 	struct ether_header *eh;
   6913 	int offset, iphl;
   6914 
   6915 	/*
   6916 	 * XXX It would be nice if the mbuf pkthdr had offset
   6917 	 * fields for the protocol headers.
   6918 	 */
   6919 	*cmdlenp = 0;
   6920 	*fieldsp = 0;
   6921 
   6922 	eh = mtod(m0, struct ether_header *);
   6923 	switch (htons(eh->ether_type)) {
   6924 	case ETHERTYPE_IP:
   6925 	case ETHERTYPE_IPV6:
   6926 		offset = ETHER_HDR_LEN;
   6927 		break;
   6928 
   6929 	case ETHERTYPE_VLAN:
   6930 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6931 		break;
   6932 
   6933 	default:
   6934 		/* Don't support this protocol or encapsulation. */
   6935 		*do_csum = false;
   6936 		return 0;
   6937 	}
   6938 	*do_csum = true;
   6939 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6940 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6941 
   6942 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6943 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6944 
   6945 	if ((m0->m_pkthdr.csum_flags &
   6946 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6947 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6948 	} else {
   6949 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6950 	}
   6951 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6952 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6953 
   6954 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6955 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6956 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6957 		*cmdlenp |= NQTX_CMD_VLE;
   6958 	}
   6959 
   6960 	mssidx = 0;
   6961 
   6962 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6963 		int hlen = offset + iphl;
   6964 		int tcp_hlen;
   6965 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6966 
   6967 		if (__predict_false(m0->m_len <
   6968 				    (hlen + sizeof(struct tcphdr)))) {
   6969 			/*
   6970 			 * TCP/IP headers are not in the first mbuf; we need
   6971 			 * to do this the slow and painful way.  Let's just
   6972 			 * hope this doesn't happen very often.
   6973 			 */
   6974 			struct tcphdr th;
   6975 
   6976 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6977 
   6978 			m_copydata(m0, hlen, sizeof(th), &th);
   6979 			if (v4) {
   6980 				struct ip ip;
   6981 
   6982 				m_copydata(m0, offset, sizeof(ip), &ip);
   6983 				ip.ip_len = 0;
   6984 				m_copyback(m0,
   6985 				    offset + offsetof(struct ip, ip_len),
   6986 				    sizeof(ip.ip_len), &ip.ip_len);
   6987 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6988 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6989 			} else {
   6990 				struct ip6_hdr ip6;
   6991 
   6992 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6993 				ip6.ip6_plen = 0;
   6994 				m_copyback(m0,
   6995 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6996 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6997 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6998 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6999 			}
   7000 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7001 			    sizeof(th.th_sum), &th.th_sum);
   7002 
   7003 			tcp_hlen = th.th_off << 2;
   7004 		} else {
   7005 			/*
   7006 			 * TCP/IP headers are in the first mbuf; we can do
   7007 			 * this the easy way.
   7008 			 */
   7009 			struct tcphdr *th;
   7010 
   7011 			if (v4) {
   7012 				struct ip *ip =
   7013 				    (void *)(mtod(m0, char *) + offset);
   7014 				th = (void *)(mtod(m0, char *) + hlen);
   7015 
   7016 				ip->ip_len = 0;
   7017 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7018 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7019 			} else {
   7020 				struct ip6_hdr *ip6 =
   7021 				    (void *)(mtod(m0, char *) + offset);
   7022 				th = (void *)(mtod(m0, char *) + hlen);
   7023 
   7024 				ip6->ip6_plen = 0;
   7025 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7026 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7027 			}
   7028 			tcp_hlen = th->th_off << 2;
   7029 		}
   7030 		hlen += tcp_hlen;
   7031 		*cmdlenp |= NQTX_CMD_TSE;
   7032 
   7033 		if (v4) {
   7034 			WM_Q_EVCNT_INCR(txq, txtso);
   7035 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7036 		} else {
   7037 			WM_Q_EVCNT_INCR(txq, txtso6);
   7038 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7039 		}
   7040 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7041 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7042 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7043 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7044 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7045 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7046 	} else {
   7047 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7048 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7049 	}
   7050 
   7051 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7052 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7053 		cmdc |= NQTXC_CMD_IP4;
   7054 	}
   7055 
   7056 	if (m0->m_pkthdr.csum_flags &
   7057 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7058 		WM_Q_EVCNT_INCR(txq, txtusum);
   7059 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7060 			cmdc |= NQTXC_CMD_TCP;
   7061 		} else {
   7062 			cmdc |= NQTXC_CMD_UDP;
   7063 		}
   7064 		cmdc |= NQTXC_CMD_IP4;
   7065 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7066 	}
   7067 	if (m0->m_pkthdr.csum_flags &
   7068 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7069 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7070 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7071 			cmdc |= NQTXC_CMD_TCP;
   7072 		} else {
   7073 			cmdc |= NQTXC_CMD_UDP;
   7074 		}
   7075 		cmdc |= NQTXC_CMD_IP6;
   7076 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7077 	}
   7078 
   7079 	/* Fill in the context descriptor. */
   7080 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7081 	    htole32(vl_len);
   7082 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7083 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7084 	    htole32(cmdc);
   7085 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7086 	    htole32(mssidx);
   7087 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7088 	DPRINTF(WM_DEBUG_TX,
   7089 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7090 	    txq->txq_next, 0, vl_len));
   7091 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7092 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7093 	txs->txs_ndesc++;
   7094 	return 0;
   7095 }
   7096 
   7097 /*
   7098  * wm_nq_start:		[ifnet interface function]
   7099  *
   7100  *	Start packet transmission on the interface for NEWQUEUE devices
   7101  */
   7102 static void
   7103 wm_nq_start(struct ifnet *ifp)
   7104 {
   7105 	struct wm_softc *sc = ifp->if_softc;
   7106 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7107 
   7108 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7109 
   7110 	/*
   7111 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7112 	 */
   7113 
   7114 	mutex_enter(txq->txq_lock);
   7115 	if (!txq->txq_stopping)
   7116 		wm_nq_start_locked(ifp);
   7117 	mutex_exit(txq->txq_lock);
   7118 }
   7119 
   7120 static void
   7121 wm_nq_start_locked(struct ifnet *ifp)
   7122 {
   7123 	struct wm_softc *sc = ifp->if_softc;
   7124 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7125 
   7126 	wm_nq_send_common_locked(ifp, txq, false);
   7127 }
   7128 
   7129 static int
   7130 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7131 {
   7132 	int qid;
   7133 	struct wm_softc *sc = ifp->if_softc;
   7134 	struct wm_txqueue *txq;
   7135 
   7136 	qid = wm_select_txqueue(ifp, m);
   7137 	txq = &sc->sc_queue[qid].wmq_txq;
   7138 
   7139 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7140 		m_freem(m);
   7141 		WM_Q_EVCNT_INCR(txq, txdrop);
   7142 		return ENOBUFS;
   7143 	}
   7144 
   7145 	/*
   7146 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7147 	 */
   7148 	ifp->if_obytes += m->m_pkthdr.len;
   7149 	if (m->m_flags & M_MCAST)
   7150 		ifp->if_omcasts++;
   7151 
   7152 	/*
   7153 	 * The situations which this mutex_tryenter() fails at running time
   7154 	 * are below two patterns.
   7155 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7156 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7157 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7158 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7159 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7160 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7161 	 */
   7162 	if (mutex_tryenter(txq->txq_lock)) {
   7163 		if (!txq->txq_stopping)
   7164 			wm_nq_transmit_locked(ifp, txq);
   7165 		mutex_exit(txq->txq_lock);
   7166 	}
   7167 
   7168 	return 0;
   7169 }
   7170 
   7171 static void
   7172 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7173 {
   7174 
   7175 	wm_nq_send_common_locked(ifp, txq, true);
   7176 }
   7177 
   7178 static void
   7179 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7180     bool is_transmit)
   7181 {
   7182 	struct wm_softc *sc = ifp->if_softc;
   7183 	struct mbuf *m0;
   7184 	struct m_tag *mtag;
   7185 	struct wm_txsoft *txs;
   7186 	bus_dmamap_t dmamap;
   7187 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7188 	bool do_csum, sent;
   7189 
   7190 	KASSERT(mutex_owned(txq->txq_lock));
   7191 
   7192 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7193 		return;
   7194 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7195 		return;
   7196 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7197 		return;
   7198 
   7199 	sent = false;
   7200 
   7201 	/*
   7202 	 * Loop through the send queue, setting up transmit descriptors
   7203 	 * until we drain the queue, or use up all available transmit
   7204 	 * descriptors.
   7205 	 */
   7206 	for (;;) {
   7207 		m0 = NULL;
   7208 
   7209 		/* Get a work queue entry. */
   7210 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7211 			wm_txeof(sc, txq);
   7212 			if (txq->txq_sfree == 0) {
   7213 				DPRINTF(WM_DEBUG_TX,
   7214 				    ("%s: TX: no free job descriptors\n",
   7215 					device_xname(sc->sc_dev)));
   7216 				WM_Q_EVCNT_INCR(txq, txsstall);
   7217 				break;
   7218 			}
   7219 		}
   7220 
   7221 		/* Grab a packet off the queue. */
   7222 		if (is_transmit)
   7223 			m0 = pcq_get(txq->txq_interq);
   7224 		else
   7225 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7226 		if (m0 == NULL)
   7227 			break;
   7228 
   7229 		DPRINTF(WM_DEBUG_TX,
   7230 		    ("%s: TX: have packet to transmit: %p\n",
   7231 		    device_xname(sc->sc_dev), m0));
   7232 
   7233 		txs = &txq->txq_soft[txq->txq_snext];
   7234 		dmamap = txs->txs_dmamap;
   7235 
   7236 		/*
   7237 		 * Load the DMA map.  If this fails, the packet either
   7238 		 * didn't fit in the allotted number of segments, or we
   7239 		 * were short on resources.  For the too-many-segments
   7240 		 * case, we simply report an error and drop the packet,
   7241 		 * since we can't sanely copy a jumbo packet to a single
   7242 		 * buffer.
   7243 		 */
   7244 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7245 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7246 		if (error) {
   7247 			if (error == EFBIG) {
   7248 				WM_Q_EVCNT_INCR(txq, txdrop);
   7249 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7250 				    "DMA segments, dropping...\n",
   7251 				    device_xname(sc->sc_dev));
   7252 				wm_dump_mbuf_chain(sc, m0);
   7253 				m_freem(m0);
   7254 				continue;
   7255 			}
   7256 			/* Short on resources, just stop for now. */
   7257 			DPRINTF(WM_DEBUG_TX,
   7258 			    ("%s: TX: dmamap load failed: %d\n",
   7259 			    device_xname(sc->sc_dev), error));
   7260 			break;
   7261 		}
   7262 
   7263 		segs_needed = dmamap->dm_nsegs;
   7264 
   7265 		/*
   7266 		 * Ensure we have enough descriptors free to describe
   7267 		 * the packet.  Note, we always reserve one descriptor
   7268 		 * at the end of the ring due to the semantics of the
   7269 		 * TDT register, plus one more in the event we need
   7270 		 * to load offload context.
   7271 		 */
   7272 		if (segs_needed > txq->txq_free - 2) {
   7273 			/*
   7274 			 * Not enough free descriptors to transmit this
   7275 			 * packet.  We haven't committed anything yet,
   7276 			 * so just unload the DMA map, put the packet
   7277 			 * pack on the queue, and punt.  Notify the upper
   7278 			 * layer that there are no more slots left.
   7279 			 */
   7280 			DPRINTF(WM_DEBUG_TX,
   7281 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7282 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7283 			    segs_needed, txq->txq_free - 1));
   7284 			if (!is_transmit)
   7285 				ifp->if_flags |= IFF_OACTIVE;
   7286 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7287 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7288 			WM_Q_EVCNT_INCR(txq, txdstall);
   7289 			break;
   7290 		}
   7291 
   7292 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7293 
   7294 		DPRINTF(WM_DEBUG_TX,
   7295 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7296 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7297 
   7298 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7299 
   7300 		/*
   7301 		 * Store a pointer to the packet so that we can free it
   7302 		 * later.
   7303 		 *
   7304 		 * Initially, we consider the number of descriptors the
   7305 		 * packet uses the number of DMA segments.  This may be
   7306 		 * incremented by 1 if we do checksum offload (a descriptor
   7307 		 * is used to set the checksum context).
   7308 		 */
   7309 		txs->txs_mbuf = m0;
   7310 		txs->txs_firstdesc = txq->txq_next;
   7311 		txs->txs_ndesc = segs_needed;
   7312 
   7313 		/* Set up offload parameters for this packet. */
   7314 		uint32_t cmdlen, fields, dcmdlen;
   7315 		if (m0->m_pkthdr.csum_flags &
   7316 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7317 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7318 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7319 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7320 			    &do_csum) != 0) {
   7321 				/* Error message already displayed. */
   7322 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7323 				continue;
   7324 			}
   7325 		} else {
   7326 			do_csum = false;
   7327 			cmdlen = 0;
   7328 			fields = 0;
   7329 		}
   7330 
   7331 		/* Sync the DMA map. */
   7332 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7333 		    BUS_DMASYNC_PREWRITE);
   7334 
   7335 		/* Initialize the first transmit descriptor. */
   7336 		nexttx = txq->txq_next;
   7337 		if (!do_csum) {
   7338 			/* setup a legacy descriptor */
   7339 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7340 			    dmamap->dm_segs[0].ds_addr);
   7341 			txq->txq_descs[nexttx].wtx_cmdlen =
   7342 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7343 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7344 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7345 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7346 			    NULL) {
   7347 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7348 				    htole32(WTX_CMD_VLE);
   7349 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7350 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7351 			} else {
   7352 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7353 			}
   7354 			dcmdlen = 0;
   7355 		} else {
   7356 			/* setup an advanced data descriptor */
   7357 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7358 			    htole64(dmamap->dm_segs[0].ds_addr);
   7359 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7360 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7361 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7362 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7363 			    htole32(fields);
   7364 			DPRINTF(WM_DEBUG_TX,
   7365 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7366 			    device_xname(sc->sc_dev), nexttx,
   7367 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7368 			DPRINTF(WM_DEBUG_TX,
   7369 			    ("\t 0x%08x%08x\n", fields,
   7370 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7371 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7372 		}
   7373 
   7374 		lasttx = nexttx;
   7375 		nexttx = WM_NEXTTX(txq, nexttx);
   7376 		/*
   7377 		 * fill in the next descriptors. legacy or adcanced format
   7378 		 * is the same here
   7379 		 */
   7380 		for (seg = 1; seg < dmamap->dm_nsegs;
   7381 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7382 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7383 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7384 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7385 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7386 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7387 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7388 			lasttx = nexttx;
   7389 
   7390 			DPRINTF(WM_DEBUG_TX,
   7391 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7392 			     "len %#04zx\n",
   7393 			    device_xname(sc->sc_dev), nexttx,
   7394 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7395 			    dmamap->dm_segs[seg].ds_len));
   7396 		}
   7397 
   7398 		KASSERT(lasttx != -1);
   7399 
   7400 		/*
   7401 		 * Set up the command byte on the last descriptor of
   7402 		 * the packet.  If we're in the interrupt delay window,
   7403 		 * delay the interrupt.
   7404 		 */
   7405 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7406 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7407 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7408 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7409 
   7410 		txs->txs_lastdesc = lasttx;
   7411 
   7412 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7413 		    device_xname(sc->sc_dev),
   7414 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7415 
   7416 		/* Sync the descriptors we're using. */
   7417 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7418 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7419 
   7420 		/* Give the packet to the chip. */
   7421 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7422 		sent = true;
   7423 
   7424 		DPRINTF(WM_DEBUG_TX,
   7425 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7426 
   7427 		DPRINTF(WM_DEBUG_TX,
   7428 		    ("%s: TX: finished transmitting packet, job %d\n",
   7429 		    device_xname(sc->sc_dev), txq->txq_snext));
   7430 
   7431 		/* Advance the tx pointer. */
   7432 		txq->txq_free -= txs->txs_ndesc;
   7433 		txq->txq_next = nexttx;
   7434 
   7435 		txq->txq_sfree--;
   7436 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7437 
   7438 		/* Pass the packet to any BPF listeners. */
   7439 		bpf_mtap(ifp, m0);
   7440 	}
   7441 
   7442 	if (m0 != NULL) {
   7443 		if (!is_transmit)
   7444 			ifp->if_flags |= IFF_OACTIVE;
   7445 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7446 		WM_Q_EVCNT_INCR(txq, txdrop);
   7447 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7448 			__func__));
   7449 		m_freem(m0);
   7450 	}
   7451 
   7452 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7453 		/* No more slots; notify upper layer. */
   7454 		if (!is_transmit)
   7455 			ifp->if_flags |= IFF_OACTIVE;
   7456 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7457 	}
   7458 
   7459 	if (sent) {
   7460 		/* Set a watchdog timer in case the chip flakes out. */
   7461 		ifp->if_timer = 5;
   7462 	}
   7463 }
   7464 
   7465 static void
   7466 wm_deferred_start_locked(struct wm_txqueue *txq)
   7467 {
   7468 	struct wm_softc *sc = txq->txq_sc;
   7469 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7470 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7471 	int qid = wmq->wmq_id;
   7472 
   7473 	KASSERT(mutex_owned(txq->txq_lock));
   7474 
   7475 	if (txq->txq_stopping) {
   7476 		mutex_exit(txq->txq_lock);
   7477 		return;
   7478 	}
   7479 
   7480 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7481 		/* XXX need for ALTQ */
   7482 		if (qid == 0)
   7483 			wm_nq_start_locked(ifp);
   7484 		wm_nq_transmit_locked(ifp, txq);
   7485 	} else {
   7486 		/* XXX need for ALTQ */
   7487 		if (qid == 0)
   7488 			wm_start_locked(ifp);
   7489 		wm_transmit_locked(ifp, txq);
   7490 	}
   7491 }
   7492 
   7493 /* Interrupt */
   7494 
   7495 /*
   7496  * wm_txeof:
   7497  *
   7498  *	Helper; handle transmit interrupts.
   7499  */
   7500 static int
   7501 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7502 {
   7503 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7504 	struct wm_txsoft *txs;
   7505 	bool processed = false;
   7506 	int count = 0;
   7507 	int i;
   7508 	uint8_t status;
   7509 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7510 
   7511 	KASSERT(mutex_owned(txq->txq_lock));
   7512 
   7513 	if (txq->txq_stopping)
   7514 		return 0;
   7515 
   7516 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7517 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7518 	if (wmq->wmq_id == 0)
   7519 		ifp->if_flags &= ~IFF_OACTIVE;
   7520 
   7521 	/*
   7522 	 * Go through the Tx list and free mbufs for those
   7523 	 * frames which have been transmitted.
   7524 	 */
   7525 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7526 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7527 		txs = &txq->txq_soft[i];
   7528 
   7529 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7530 			device_xname(sc->sc_dev), i));
   7531 
   7532 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7533 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7534 
   7535 		status =
   7536 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7537 		if ((status & WTX_ST_DD) == 0) {
   7538 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7539 			    BUS_DMASYNC_PREREAD);
   7540 			break;
   7541 		}
   7542 
   7543 		processed = true;
   7544 		count++;
   7545 		DPRINTF(WM_DEBUG_TX,
   7546 		    ("%s: TX: job %d done: descs %d..%d\n",
   7547 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7548 		    txs->txs_lastdesc));
   7549 
   7550 		/*
   7551 		 * XXX We should probably be using the statistics
   7552 		 * XXX registers, but I don't know if they exist
   7553 		 * XXX on chips before the i82544.
   7554 		 */
   7555 
   7556 #ifdef WM_EVENT_COUNTERS
   7557 		if (status & WTX_ST_TU)
   7558 			WM_Q_EVCNT_INCR(txq, tu);
   7559 #endif /* WM_EVENT_COUNTERS */
   7560 
   7561 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7562 			ifp->if_oerrors++;
   7563 			if (status & WTX_ST_LC)
   7564 				log(LOG_WARNING, "%s: late collision\n",
   7565 				    device_xname(sc->sc_dev));
   7566 			else if (status & WTX_ST_EC) {
   7567 				ifp->if_collisions += 16;
   7568 				log(LOG_WARNING, "%s: excessive collisions\n",
   7569 				    device_xname(sc->sc_dev));
   7570 			}
   7571 		} else
   7572 			ifp->if_opackets++;
   7573 
   7574 		txq->txq_free += txs->txs_ndesc;
   7575 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7576 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7577 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7578 		m_freem(txs->txs_mbuf);
   7579 		txs->txs_mbuf = NULL;
   7580 	}
   7581 
   7582 	/* Update the dirty transmit buffer pointer. */
   7583 	txq->txq_sdirty = i;
   7584 	DPRINTF(WM_DEBUG_TX,
   7585 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7586 
   7587 	if (count != 0)
   7588 		rnd_add_uint32(&sc->rnd_source, count);
   7589 
   7590 	/*
   7591 	 * If there are no more pending transmissions, cancel the watchdog
   7592 	 * timer.
   7593 	 */
   7594 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7595 		ifp->if_timer = 0;
   7596 
   7597 	return processed;
   7598 }
   7599 
   7600 static inline uint32_t
   7601 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7602 {
   7603 	struct wm_softc *sc = rxq->rxq_sc;
   7604 
   7605 	if (sc->sc_type == WM_T_82574)
   7606 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7607 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7608 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7609 	else
   7610 		return rxq->rxq_descs[idx].wrx_status;
   7611 }
   7612 
   7613 static inline uint32_t
   7614 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7615 {
   7616 	struct wm_softc *sc = rxq->rxq_sc;
   7617 
   7618 	if (sc->sc_type == WM_T_82574)
   7619 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7620 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7621 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7622 	else
   7623 		return rxq->rxq_descs[idx].wrx_errors;
   7624 }
   7625 
   7626 static inline uint16_t
   7627 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7628 {
   7629 	struct wm_softc *sc = rxq->rxq_sc;
   7630 
   7631 	if (sc->sc_type == WM_T_82574)
   7632 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7633 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7634 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7635 	else
   7636 		return rxq->rxq_descs[idx].wrx_special;
   7637 }
   7638 
   7639 static inline int
   7640 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7641 {
   7642 	struct wm_softc *sc = rxq->rxq_sc;
   7643 
   7644 	if (sc->sc_type == WM_T_82574)
   7645 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   7646 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7647 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   7648 	else
   7649 		return rxq->rxq_descs[idx].wrx_len;
   7650 }
   7651 
   7652 #ifdef WM_DEBUG
   7653 static inline uint32_t
   7654 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   7655 {
   7656 	struct wm_softc *sc = rxq->rxq_sc;
   7657 
   7658 	if (sc->sc_type == WM_T_82574)
   7659 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   7660 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7661 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   7662 	else
   7663 		return 0;
   7664 }
   7665 
   7666 static inline uint8_t
   7667 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   7668 {
   7669 	struct wm_softc *sc = rxq->rxq_sc;
   7670 
   7671 	if (sc->sc_type == WM_T_82574)
   7672 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   7673 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7674 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   7675 	else
   7676 		return 0;
   7677 }
   7678 #endif /* WM_DEBUG */
   7679 
   7680 static inline bool
   7681 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   7682     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7683 {
   7684 
   7685 	if (sc->sc_type == WM_T_82574)
   7686 		return (status & ext_bit) != 0;
   7687 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7688 		return (status & nq_bit) != 0;
   7689 	else
   7690 		return (status & legacy_bit) != 0;
   7691 }
   7692 
   7693 static inline bool
   7694 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   7695     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7696 {
   7697 
   7698 	if (sc->sc_type == WM_T_82574)
   7699 		return (error & ext_bit) != 0;
   7700 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7701 		return (error & nq_bit) != 0;
   7702 	else
   7703 		return (error & legacy_bit) != 0;
   7704 }
   7705 
   7706 static inline bool
   7707 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   7708 {
   7709 
   7710 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7711 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   7712 		return true;
   7713 	else
   7714 		return false;
   7715 }
   7716 
   7717 static inline bool
   7718 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   7719 {
   7720 	struct wm_softc *sc = rxq->rxq_sc;
   7721 
   7722 	/* XXXX missing error bit for newqueue? */
   7723 	if (wm_rxdesc_is_set_error(sc, errors,
   7724 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   7725 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   7726 		NQRXC_ERROR_RXE)) {
   7727 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   7728 			log(LOG_WARNING, "%s: symbol error\n",
   7729 			    device_xname(sc->sc_dev));
   7730 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   7731 			log(LOG_WARNING, "%s: receive sequence error\n",
   7732 			    device_xname(sc->sc_dev));
   7733 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   7734 			log(LOG_WARNING, "%s: CRC error\n",
   7735 			    device_xname(sc->sc_dev));
   7736 		return true;
   7737 	}
   7738 
   7739 	return false;
   7740 }
   7741 
   7742 static inline bool
   7743 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   7744 {
   7745 	struct wm_softc *sc = rxq->rxq_sc;
   7746 
   7747 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   7748 		NQRXC_STATUS_DD)) {
   7749 		/* We have processed all of the receive descriptors. */
   7750 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   7751 		return false;
   7752 	}
   7753 
   7754 	return true;
   7755 }
   7756 
   7757 static inline bool
   7758 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   7759     struct mbuf *m)
   7760 {
   7761 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   7762 
   7763 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7764 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   7765 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   7766 	}
   7767 
   7768 	return true;
   7769 }
   7770 
   7771 static inline void
   7772 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   7773     uint32_t errors, struct mbuf *m)
   7774 {
   7775 	struct wm_softc *sc = rxq->rxq_sc;
   7776 
   7777 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   7778 		if (wm_rxdesc_is_set_status(sc, status,
   7779 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   7780 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   7781 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7782 			if (wm_rxdesc_is_set_error(sc, errors,
   7783 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   7784 				m->m_pkthdr.csum_flags |=
   7785 					M_CSUM_IPv4_BAD;
   7786 		}
   7787 		if (wm_rxdesc_is_set_status(sc, status,
   7788 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   7789 			/*
   7790 			 * Note: we don't know if this was TCP or UDP,
   7791 			 * so we just set both bits, and expect the
   7792 			 * upper layers to deal.
   7793 			 */
   7794 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   7795 			m->m_pkthdr.csum_flags |=
   7796 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7797 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7798 			if (wm_rxdesc_is_set_error(sc, errors,
   7799 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   7800 				m->m_pkthdr.csum_flags |=
   7801 					M_CSUM_TCP_UDP_BAD;
   7802 		}
   7803 	}
   7804 }
   7805 
   7806 /*
   7807  * wm_rxeof:
   7808  *
   7809  *	Helper; handle receive interrupts.
   7810  */
   7811 static void
   7812 wm_rxeof(struct wm_rxqueue *rxq)
   7813 {
   7814 	struct wm_softc *sc = rxq->rxq_sc;
   7815 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7816 	struct wm_rxsoft *rxs;
   7817 	struct mbuf *m;
   7818 	int i, len;
   7819 	int count = 0;
   7820 	uint32_t status, errors;
   7821 	uint16_t vlantag;
   7822 
   7823 	KASSERT(mutex_owned(rxq->rxq_lock));
   7824 
   7825 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7826 		rxs = &rxq->rxq_soft[i];
   7827 
   7828 		DPRINTF(WM_DEBUG_RX,
   7829 		    ("%s: RX: checking descriptor %d\n",
   7830 		    device_xname(sc->sc_dev), i));
   7831 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7832 
   7833 		status = wm_rxdesc_get_status(rxq, i);
   7834 		errors = wm_rxdesc_get_errors(rxq, i);
   7835 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   7836 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   7837 #ifdef WM_DEBUG
   7838 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   7839 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   7840 #endif
   7841 
   7842 		if (!wm_rxdesc_dd(rxq, i, status)) {
   7843 			/*
   7844 			 * Update the receive pointer holding rxq_lock
   7845 			 * consistent with increment counter.
   7846 			 */
   7847 			rxq->rxq_ptr = i;
   7848 			break;
   7849 		}
   7850 
   7851 		count++;
   7852 		if (__predict_false(rxq->rxq_discard)) {
   7853 			DPRINTF(WM_DEBUG_RX,
   7854 			    ("%s: RX: discarding contents of descriptor %d\n",
   7855 			    device_xname(sc->sc_dev), i));
   7856 			wm_init_rxdesc(rxq, i);
   7857 			if (wm_rxdesc_is_eop(rxq, status)) {
   7858 				/* Reset our state. */
   7859 				DPRINTF(WM_DEBUG_RX,
   7860 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7861 				    device_xname(sc->sc_dev)));
   7862 				rxq->rxq_discard = 0;
   7863 			}
   7864 			continue;
   7865 		}
   7866 
   7867 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7868 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7869 
   7870 		m = rxs->rxs_mbuf;
   7871 
   7872 		/*
   7873 		 * Add a new receive buffer to the ring, unless of
   7874 		 * course the length is zero. Treat the latter as a
   7875 		 * failed mapping.
   7876 		 */
   7877 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7878 			/*
   7879 			 * Failed, throw away what we've done so
   7880 			 * far, and discard the rest of the packet.
   7881 			 */
   7882 			ifp->if_ierrors++;
   7883 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7884 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7885 			wm_init_rxdesc(rxq, i);
   7886 			if (!wm_rxdesc_is_eop(rxq, status))
   7887 				rxq->rxq_discard = 1;
   7888 			if (rxq->rxq_head != NULL)
   7889 				m_freem(rxq->rxq_head);
   7890 			WM_RXCHAIN_RESET(rxq);
   7891 			DPRINTF(WM_DEBUG_RX,
   7892 			    ("%s: RX: Rx buffer allocation failed, "
   7893 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7894 			    rxq->rxq_discard ? " (discard)" : ""));
   7895 			continue;
   7896 		}
   7897 
   7898 		m->m_len = len;
   7899 		rxq->rxq_len += len;
   7900 		DPRINTF(WM_DEBUG_RX,
   7901 		    ("%s: RX: buffer at %p len %d\n",
   7902 		    device_xname(sc->sc_dev), m->m_data, len));
   7903 
   7904 		/* If this is not the end of the packet, keep looking. */
   7905 		if (!wm_rxdesc_is_eop(rxq, status)) {
   7906 			WM_RXCHAIN_LINK(rxq, m);
   7907 			DPRINTF(WM_DEBUG_RX,
   7908 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7909 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7910 			continue;
   7911 		}
   7912 
   7913 		/*
   7914 		 * Okay, we have the entire packet now.  The chip is
   7915 		 * configured to include the FCS except I350 and I21[01]
   7916 		 * (not all chips can be configured to strip it),
   7917 		 * so we need to trim it.
   7918 		 * May need to adjust length of previous mbuf in the
   7919 		 * chain if the current mbuf is too short.
   7920 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7921 		 * is always set in I350, so we don't trim it.
   7922 		 */
   7923 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7924 		    && (sc->sc_type != WM_T_I210)
   7925 		    && (sc->sc_type != WM_T_I211)) {
   7926 			if (m->m_len < ETHER_CRC_LEN) {
   7927 				rxq->rxq_tail->m_len
   7928 				    -= (ETHER_CRC_LEN - m->m_len);
   7929 				m->m_len = 0;
   7930 			} else
   7931 				m->m_len -= ETHER_CRC_LEN;
   7932 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7933 		} else
   7934 			len = rxq->rxq_len;
   7935 
   7936 		WM_RXCHAIN_LINK(rxq, m);
   7937 
   7938 		*rxq->rxq_tailp = NULL;
   7939 		m = rxq->rxq_head;
   7940 
   7941 		WM_RXCHAIN_RESET(rxq);
   7942 
   7943 		DPRINTF(WM_DEBUG_RX,
   7944 		    ("%s: RX: have entire packet, len -> %d\n",
   7945 		    device_xname(sc->sc_dev), len));
   7946 
   7947 		/* If an error occurred, update stats and drop the packet. */
   7948 		if (wm_rxdesc_has_errors(rxq, errors)) {
   7949 			m_freem(m);
   7950 			continue;
   7951 		}
   7952 
   7953 		/* No errors.  Receive the packet. */
   7954 		m_set_rcvif(m, ifp);
   7955 		m->m_pkthdr.len = len;
   7956 		/*
   7957 		 * TODO
   7958 		 * should be save rsshash and rsstype to this mbuf.
   7959 		 */
   7960 		DPRINTF(WM_DEBUG_RX,
   7961 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   7962 			device_xname(sc->sc_dev), rsstype, rsshash));
   7963 
   7964 		/*
   7965 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7966 		 * for us.  Associate the tag with the packet.
   7967 		 */
   7968 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   7969 			continue;
   7970 
   7971 		/* Set up checksum info for this packet. */
   7972 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   7973 		/*
   7974 		 * Update the receive pointer holding rxq_lock consistent with
   7975 		 * increment counter.
   7976 		 */
   7977 		rxq->rxq_ptr = i;
   7978 		mutex_exit(rxq->rxq_lock);
   7979 
   7980 		/* Pass it on. */
   7981 		if_percpuq_enqueue(sc->sc_ipq, m);
   7982 
   7983 		mutex_enter(rxq->rxq_lock);
   7984 
   7985 		if (rxq->rxq_stopping)
   7986 			break;
   7987 	}
   7988 
   7989 	if (count != 0)
   7990 		rnd_add_uint32(&sc->rnd_source, count);
   7991 
   7992 	DPRINTF(WM_DEBUG_RX,
   7993 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7994 }
   7995 
   7996 /*
   7997  * wm_linkintr_gmii:
   7998  *
   7999  *	Helper; handle link interrupts for GMII.
   8000  */
   8001 static void
   8002 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8003 {
   8004 
   8005 	KASSERT(WM_CORE_LOCKED(sc));
   8006 
   8007 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8008 		__func__));
   8009 
   8010 	if (icr & ICR_LSC) {
   8011 		uint32_t reg;
   8012 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8013 
   8014 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8015 			wm_gig_downshift_workaround_ich8lan(sc);
   8016 
   8017 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8018 			device_xname(sc->sc_dev)));
   8019 		mii_pollstat(&sc->sc_mii);
   8020 		if (sc->sc_type == WM_T_82543) {
   8021 			int miistatus, active;
   8022 
   8023 			/*
   8024 			 * With 82543, we need to force speed and
   8025 			 * duplex on the MAC equal to what the PHY
   8026 			 * speed and duplex configuration is.
   8027 			 */
   8028 			miistatus = sc->sc_mii.mii_media_status;
   8029 
   8030 			if (miistatus & IFM_ACTIVE) {
   8031 				active = sc->sc_mii.mii_media_active;
   8032 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8033 				switch (IFM_SUBTYPE(active)) {
   8034 				case IFM_10_T:
   8035 					sc->sc_ctrl |= CTRL_SPEED_10;
   8036 					break;
   8037 				case IFM_100_TX:
   8038 					sc->sc_ctrl |= CTRL_SPEED_100;
   8039 					break;
   8040 				case IFM_1000_T:
   8041 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8042 					break;
   8043 				default:
   8044 					/*
   8045 					 * fiber?
   8046 					 * Shoud not enter here.
   8047 					 */
   8048 					printf("unknown media (%x)\n", active);
   8049 					break;
   8050 				}
   8051 				if (active & IFM_FDX)
   8052 					sc->sc_ctrl |= CTRL_FD;
   8053 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8054 			}
   8055 		} else if ((sc->sc_type == WM_T_ICH8)
   8056 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8057 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8058 		} else if (sc->sc_type == WM_T_PCH) {
   8059 			wm_k1_gig_workaround_hv(sc,
   8060 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8061 		}
   8062 
   8063 		if ((sc->sc_phytype == WMPHY_82578)
   8064 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8065 			== IFM_1000_T)) {
   8066 
   8067 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8068 				delay(200*1000); /* XXX too big */
   8069 
   8070 				/* Link stall fix for link up */
   8071 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8072 				    HV_MUX_DATA_CTRL,
   8073 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8074 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8075 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8076 				    HV_MUX_DATA_CTRL,
   8077 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8078 			}
   8079 		}
   8080 		/*
   8081 		 * I217 Packet Loss issue:
   8082 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8083 		 * on power up.
   8084 		 * Set the Beacon Duration for I217 to 8 usec
   8085 		 */
   8086 		if ((sc->sc_type == WM_T_PCH_LPT)
   8087 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8088 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8089 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8090 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8091 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8092 		}
   8093 
   8094 		/* XXX Work-around I218 hang issue */
   8095 		/* e1000_k1_workaround_lpt_lp() */
   8096 
   8097 		if ((sc->sc_type == WM_T_PCH_LPT)
   8098 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8099 			/*
   8100 			 * Set platform power management values for Latency
   8101 			 * Tolerance Reporting (LTR)
   8102 			 */
   8103 			wm_platform_pm_pch_lpt(sc,
   8104 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8105 				    != 0));
   8106 		}
   8107 
   8108 		/* FEXTNVM6 K1-off workaround */
   8109 		if (sc->sc_type == WM_T_PCH_SPT) {
   8110 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8111 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8112 			    & FEXTNVM6_K1_OFF_ENABLE)
   8113 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8114 			else
   8115 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8116 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8117 		}
   8118 	} else if (icr & ICR_RXSEQ) {
   8119 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8120 			device_xname(sc->sc_dev)));
   8121 	}
   8122 }
   8123 
   8124 /*
   8125  * wm_linkintr_tbi:
   8126  *
   8127  *	Helper; handle link interrupts for TBI mode.
   8128  */
   8129 static void
   8130 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8131 {
   8132 	uint32_t status;
   8133 
   8134 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8135 		__func__));
   8136 
   8137 	status = CSR_READ(sc, WMREG_STATUS);
   8138 	if (icr & ICR_LSC) {
   8139 		if (status & STATUS_LU) {
   8140 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8141 			    device_xname(sc->sc_dev),
   8142 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8143 			/*
   8144 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8145 			 * so we should update sc->sc_ctrl
   8146 			 */
   8147 
   8148 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8149 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8150 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8151 			if (status & STATUS_FD)
   8152 				sc->sc_tctl |=
   8153 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8154 			else
   8155 				sc->sc_tctl |=
   8156 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8157 			if (sc->sc_ctrl & CTRL_TFCE)
   8158 				sc->sc_fcrtl |= FCRTL_XONE;
   8159 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8160 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8161 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8162 				      sc->sc_fcrtl);
   8163 			sc->sc_tbi_linkup = 1;
   8164 		} else {
   8165 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8166 			    device_xname(sc->sc_dev)));
   8167 			sc->sc_tbi_linkup = 0;
   8168 		}
   8169 		/* Update LED */
   8170 		wm_tbi_serdes_set_linkled(sc);
   8171 	} else if (icr & ICR_RXSEQ) {
   8172 		DPRINTF(WM_DEBUG_LINK,
   8173 		    ("%s: LINK: Receive sequence error\n",
   8174 		    device_xname(sc->sc_dev)));
   8175 	}
   8176 }
   8177 
   8178 /*
   8179  * wm_linkintr_serdes:
   8180  *
   8181  *	Helper; handle link interrupts for TBI mode.
   8182  */
   8183 static void
   8184 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8185 {
   8186 	struct mii_data *mii = &sc->sc_mii;
   8187 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8188 	uint32_t pcs_adv, pcs_lpab, reg;
   8189 
   8190 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8191 		__func__));
   8192 
   8193 	if (icr & ICR_LSC) {
   8194 		/* Check PCS */
   8195 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8196 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8197 			mii->mii_media_status |= IFM_ACTIVE;
   8198 			sc->sc_tbi_linkup = 1;
   8199 		} else {
   8200 			mii->mii_media_status |= IFM_NONE;
   8201 			sc->sc_tbi_linkup = 0;
   8202 			wm_tbi_serdes_set_linkled(sc);
   8203 			return;
   8204 		}
   8205 		mii->mii_media_active |= IFM_1000_SX;
   8206 		if ((reg & PCS_LSTS_FDX) != 0)
   8207 			mii->mii_media_active |= IFM_FDX;
   8208 		else
   8209 			mii->mii_media_active |= IFM_HDX;
   8210 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8211 			/* Check flow */
   8212 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8213 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8214 				DPRINTF(WM_DEBUG_LINK,
   8215 				    ("XXX LINKOK but not ACOMP\n"));
   8216 				return;
   8217 			}
   8218 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8219 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8220 			DPRINTF(WM_DEBUG_LINK,
   8221 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8222 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8223 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8224 				mii->mii_media_active |= IFM_FLOW
   8225 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8226 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8227 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8228 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8229 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8230 				mii->mii_media_active |= IFM_FLOW
   8231 				    | IFM_ETH_TXPAUSE;
   8232 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8233 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8234 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8235 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8236 				mii->mii_media_active |= IFM_FLOW
   8237 				    | IFM_ETH_RXPAUSE;
   8238 		}
   8239 		/* Update LED */
   8240 		wm_tbi_serdes_set_linkled(sc);
   8241 	} else {
   8242 		DPRINTF(WM_DEBUG_LINK,
   8243 		    ("%s: LINK: Receive sequence error\n",
   8244 		    device_xname(sc->sc_dev)));
   8245 	}
   8246 }
   8247 
   8248 /*
   8249  * wm_linkintr:
   8250  *
   8251  *	Helper; handle link interrupts.
   8252  */
   8253 static void
   8254 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8255 {
   8256 
   8257 	KASSERT(WM_CORE_LOCKED(sc));
   8258 
   8259 	if (sc->sc_flags & WM_F_HAS_MII)
   8260 		wm_linkintr_gmii(sc, icr);
   8261 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8262 	    && (sc->sc_type >= WM_T_82575))
   8263 		wm_linkintr_serdes(sc, icr);
   8264 	else
   8265 		wm_linkintr_tbi(sc, icr);
   8266 }
   8267 
   8268 /*
   8269  * wm_intr_legacy:
   8270  *
   8271  *	Interrupt service routine for INTx and MSI.
   8272  */
   8273 static int
   8274 wm_intr_legacy(void *arg)
   8275 {
   8276 	struct wm_softc *sc = arg;
   8277 	struct wm_queue *wmq = &sc->sc_queue[0];
   8278 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8279 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8280 	uint32_t icr, rndval = 0;
   8281 	int handled = 0;
   8282 
   8283 	DPRINTF(WM_DEBUG_TX,
   8284 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   8285 	while (1 /* CONSTCOND */) {
   8286 		icr = CSR_READ(sc, WMREG_ICR);
   8287 		if ((icr & sc->sc_icr) == 0)
   8288 			break;
   8289 		if (rndval == 0)
   8290 			rndval = icr;
   8291 
   8292 		mutex_enter(rxq->rxq_lock);
   8293 
   8294 		if (rxq->rxq_stopping) {
   8295 			mutex_exit(rxq->rxq_lock);
   8296 			break;
   8297 		}
   8298 
   8299 		handled = 1;
   8300 
   8301 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8302 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8303 			DPRINTF(WM_DEBUG_RX,
   8304 			    ("%s: RX: got Rx intr 0x%08x\n",
   8305 			    device_xname(sc->sc_dev),
   8306 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8307 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8308 		}
   8309 #endif
   8310 		wm_rxeof(rxq);
   8311 
   8312 		mutex_exit(rxq->rxq_lock);
   8313 		mutex_enter(txq->txq_lock);
   8314 
   8315 		if (txq->txq_stopping) {
   8316 			mutex_exit(txq->txq_lock);
   8317 			break;
   8318 		}
   8319 
   8320 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8321 		if (icr & ICR_TXDW) {
   8322 			DPRINTF(WM_DEBUG_TX,
   8323 			    ("%s: TX: got TXDW interrupt\n",
   8324 			    device_xname(sc->sc_dev)));
   8325 			WM_Q_EVCNT_INCR(txq, txdw);
   8326 		}
   8327 #endif
   8328 		wm_txeof(sc, txq);
   8329 
   8330 		mutex_exit(txq->txq_lock);
   8331 		WM_CORE_LOCK(sc);
   8332 
   8333 		if (sc->sc_core_stopping) {
   8334 			WM_CORE_UNLOCK(sc);
   8335 			break;
   8336 		}
   8337 
   8338 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8339 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8340 			wm_linkintr(sc, icr);
   8341 		}
   8342 
   8343 		WM_CORE_UNLOCK(sc);
   8344 
   8345 		if (icr & ICR_RXO) {
   8346 #if defined(WM_DEBUG)
   8347 			log(LOG_WARNING, "%s: Receive overrun\n",
   8348 			    device_xname(sc->sc_dev));
   8349 #endif /* defined(WM_DEBUG) */
   8350 		}
   8351 	}
   8352 
   8353 	rnd_add_uint32(&sc->rnd_source, rndval);
   8354 
   8355 	if (handled) {
   8356 		/* Try to get more packets going. */
   8357 		softint_schedule(wmq->wmq_si);
   8358 	}
   8359 
   8360 	return handled;
   8361 }
   8362 
   8363 static inline void
   8364 wm_txrxintr_disable(struct wm_queue *wmq)
   8365 {
   8366 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8367 
   8368 	if (sc->sc_type == WM_T_82574)
   8369 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8370 	else if (sc->sc_type == WM_T_82575)
   8371 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8372 	else
   8373 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8374 }
   8375 
   8376 static inline void
   8377 wm_txrxintr_enable(struct wm_queue *wmq)
   8378 {
   8379 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8380 
   8381 	if (sc->sc_type == WM_T_82574)
   8382 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8383 	else if (sc->sc_type == WM_T_82575)
   8384 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8385 	else
   8386 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8387 }
   8388 
   8389 static int
   8390 wm_txrxintr_msix(void *arg)
   8391 {
   8392 	struct wm_queue *wmq = arg;
   8393 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8394 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8395 	struct wm_softc *sc = txq->txq_sc;
   8396 
   8397 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8398 
   8399 	DPRINTF(WM_DEBUG_TX,
   8400 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8401 
   8402 	wm_txrxintr_disable(wmq);
   8403 
   8404 	mutex_enter(txq->txq_lock);
   8405 
   8406 	if (txq->txq_stopping) {
   8407 		mutex_exit(txq->txq_lock);
   8408 		return 0;
   8409 	}
   8410 
   8411 	WM_Q_EVCNT_INCR(txq, txdw);
   8412 	wm_txeof(sc, txq);
   8413 	/* wm_deferred start() is done in wm_handle_queue(). */
   8414 	mutex_exit(txq->txq_lock);
   8415 
   8416 	DPRINTF(WM_DEBUG_RX,
   8417 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8418 	mutex_enter(rxq->rxq_lock);
   8419 
   8420 	if (rxq->rxq_stopping) {
   8421 		mutex_exit(rxq->rxq_lock);
   8422 		return 0;
   8423 	}
   8424 
   8425 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8426 	wm_rxeof(rxq);
   8427 	mutex_exit(rxq->rxq_lock);
   8428 
   8429 	softint_schedule(wmq->wmq_si);
   8430 
   8431 	wm_txrxintr_enable(wmq);
   8432 
   8433 	return 1;
   8434 }
   8435 
   8436 static void
   8437 wm_handle_queue(void *arg)
   8438 {
   8439 	struct wm_queue *wmq = arg;
   8440 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8441 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8442 	struct wm_softc *sc = txq->txq_sc;
   8443 
   8444 	mutex_enter(txq->txq_lock);
   8445 	if (txq->txq_stopping) {
   8446 		mutex_exit(txq->txq_lock);
   8447 		return;
   8448 	}
   8449 	wm_txeof(sc, txq);
   8450 	wm_deferred_start_locked(txq);
   8451 	mutex_exit(txq->txq_lock);
   8452 
   8453 	mutex_enter(rxq->rxq_lock);
   8454 	if (rxq->rxq_stopping) {
   8455 		mutex_exit(rxq->rxq_lock);
   8456 		return;
   8457 	}
   8458 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8459 	wm_rxeof(rxq);
   8460 	mutex_exit(rxq->rxq_lock);
   8461 }
   8462 
   8463 /*
   8464  * wm_linkintr_msix:
   8465  *
   8466  *	Interrupt service routine for link status change for MSI-X.
   8467  */
   8468 static int
   8469 wm_linkintr_msix(void *arg)
   8470 {
   8471 	struct wm_softc *sc = arg;
   8472 	uint32_t reg;
   8473 
   8474 	DPRINTF(WM_DEBUG_LINK,
   8475 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8476 
   8477 	reg = CSR_READ(sc, WMREG_ICR);
   8478 	WM_CORE_LOCK(sc);
   8479 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8480 		goto out;
   8481 
   8482 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8483 	wm_linkintr(sc, ICR_LSC);
   8484 
   8485 out:
   8486 	WM_CORE_UNLOCK(sc);
   8487 
   8488 	if (sc->sc_type == WM_T_82574)
   8489 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8490 	else if (sc->sc_type == WM_T_82575)
   8491 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8492 	else
   8493 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8494 
   8495 	return 1;
   8496 }
   8497 
   8498 /*
   8499  * Media related.
   8500  * GMII, SGMII, TBI (and SERDES)
   8501  */
   8502 
   8503 /* Common */
   8504 
   8505 /*
   8506  * wm_tbi_serdes_set_linkled:
   8507  *
   8508  *	Update the link LED on TBI and SERDES devices.
   8509  */
   8510 static void
   8511 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8512 {
   8513 
   8514 	if (sc->sc_tbi_linkup)
   8515 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8516 	else
   8517 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8518 
   8519 	/* 82540 or newer devices are active low */
   8520 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8521 
   8522 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8523 }
   8524 
   8525 /* GMII related */
   8526 
   8527 /*
   8528  * wm_gmii_reset:
   8529  *
   8530  *	Reset the PHY.
   8531  */
   8532 static void
   8533 wm_gmii_reset(struct wm_softc *sc)
   8534 {
   8535 	uint32_t reg;
   8536 	int rv;
   8537 
   8538 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8539 		device_xname(sc->sc_dev), __func__));
   8540 
   8541 	rv = sc->phy.acquire(sc);
   8542 	if (rv != 0) {
   8543 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8544 		    __func__);
   8545 		return;
   8546 	}
   8547 
   8548 	switch (sc->sc_type) {
   8549 	case WM_T_82542_2_0:
   8550 	case WM_T_82542_2_1:
   8551 		/* null */
   8552 		break;
   8553 	case WM_T_82543:
   8554 		/*
   8555 		 * With 82543, we need to force speed and duplex on the MAC
   8556 		 * equal to what the PHY speed and duplex configuration is.
   8557 		 * In addition, we need to perform a hardware reset on the PHY
   8558 		 * to take it out of reset.
   8559 		 */
   8560 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8561 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8562 
   8563 		/* The PHY reset pin is active-low. */
   8564 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8565 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8566 		    CTRL_EXT_SWDPIN(4));
   8567 		reg |= CTRL_EXT_SWDPIO(4);
   8568 
   8569 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8570 		CSR_WRITE_FLUSH(sc);
   8571 		delay(10*1000);
   8572 
   8573 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8574 		CSR_WRITE_FLUSH(sc);
   8575 		delay(150);
   8576 #if 0
   8577 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8578 #endif
   8579 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8580 		break;
   8581 	case WM_T_82544:	/* reset 10000us */
   8582 	case WM_T_82540:
   8583 	case WM_T_82545:
   8584 	case WM_T_82545_3:
   8585 	case WM_T_82546:
   8586 	case WM_T_82546_3:
   8587 	case WM_T_82541:
   8588 	case WM_T_82541_2:
   8589 	case WM_T_82547:
   8590 	case WM_T_82547_2:
   8591 	case WM_T_82571:	/* reset 100us */
   8592 	case WM_T_82572:
   8593 	case WM_T_82573:
   8594 	case WM_T_82574:
   8595 	case WM_T_82575:
   8596 	case WM_T_82576:
   8597 	case WM_T_82580:
   8598 	case WM_T_I350:
   8599 	case WM_T_I354:
   8600 	case WM_T_I210:
   8601 	case WM_T_I211:
   8602 	case WM_T_82583:
   8603 	case WM_T_80003:
   8604 		/* generic reset */
   8605 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8606 		CSR_WRITE_FLUSH(sc);
   8607 		delay(20000);
   8608 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8609 		CSR_WRITE_FLUSH(sc);
   8610 		delay(20000);
   8611 
   8612 		if ((sc->sc_type == WM_T_82541)
   8613 		    || (sc->sc_type == WM_T_82541_2)
   8614 		    || (sc->sc_type == WM_T_82547)
   8615 		    || (sc->sc_type == WM_T_82547_2)) {
   8616 			/* workaround for igp are done in igp_reset() */
   8617 			/* XXX add code to set LED after phy reset */
   8618 		}
   8619 		break;
   8620 	case WM_T_ICH8:
   8621 	case WM_T_ICH9:
   8622 	case WM_T_ICH10:
   8623 	case WM_T_PCH:
   8624 	case WM_T_PCH2:
   8625 	case WM_T_PCH_LPT:
   8626 	case WM_T_PCH_SPT:
   8627 		/* generic reset */
   8628 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8629 		CSR_WRITE_FLUSH(sc);
   8630 		delay(100);
   8631 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8632 		CSR_WRITE_FLUSH(sc);
   8633 		delay(150);
   8634 		break;
   8635 	default:
   8636 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8637 		    __func__);
   8638 		break;
   8639 	}
   8640 
   8641 	sc->phy.release(sc);
   8642 
   8643 	/* get_cfg_done */
   8644 	wm_get_cfg_done(sc);
   8645 
   8646 	/* extra setup */
   8647 	switch (sc->sc_type) {
   8648 	case WM_T_82542_2_0:
   8649 	case WM_T_82542_2_1:
   8650 	case WM_T_82543:
   8651 	case WM_T_82544:
   8652 	case WM_T_82540:
   8653 	case WM_T_82545:
   8654 	case WM_T_82545_3:
   8655 	case WM_T_82546:
   8656 	case WM_T_82546_3:
   8657 	case WM_T_82541_2:
   8658 	case WM_T_82547_2:
   8659 	case WM_T_82571:
   8660 	case WM_T_82572:
   8661 	case WM_T_82573:
   8662 	case WM_T_82575:
   8663 	case WM_T_82576:
   8664 	case WM_T_82580:
   8665 	case WM_T_I350:
   8666 	case WM_T_I354:
   8667 	case WM_T_I210:
   8668 	case WM_T_I211:
   8669 	case WM_T_80003:
   8670 		/* null */
   8671 		break;
   8672 	case WM_T_82574:
   8673 	case WM_T_82583:
   8674 		wm_lplu_d0_disable(sc);
   8675 		break;
   8676 	case WM_T_82541:
   8677 	case WM_T_82547:
   8678 		/* XXX Configure actively LED after PHY reset */
   8679 		break;
   8680 	case WM_T_ICH8:
   8681 	case WM_T_ICH9:
   8682 	case WM_T_ICH10:
   8683 	case WM_T_PCH:
   8684 	case WM_T_PCH2:
   8685 	case WM_T_PCH_LPT:
   8686 	case WM_T_PCH_SPT:
   8687 		/* Allow time for h/w to get to a quiescent state afer reset */
   8688 		delay(10*1000);
   8689 
   8690 		if (sc->sc_type == WM_T_PCH)
   8691 			wm_hv_phy_workaround_ich8lan(sc);
   8692 
   8693 		if (sc->sc_type == WM_T_PCH2)
   8694 			wm_lv_phy_workaround_ich8lan(sc);
   8695 
   8696 		/* Clear the host wakeup bit after lcd reset */
   8697 		if (sc->sc_type >= WM_T_PCH) {
   8698 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8699 			    BM_PORT_GEN_CFG);
   8700 			reg &= ~BM_WUC_HOST_WU_BIT;
   8701 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8702 			    BM_PORT_GEN_CFG, reg);
   8703 		}
   8704 
   8705 		/*
   8706 		 * XXX Configure the LCD with th extended configuration region
   8707 		 * in NVM
   8708 		 */
   8709 
   8710 		/* Disable D0 LPLU. */
   8711 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8712 			wm_lplu_d0_disable_pch(sc);
   8713 		else
   8714 			wm_lplu_d0_disable(sc);	/* ICH* */
   8715 		break;
   8716 	default:
   8717 		panic("%s: unknown type\n", __func__);
   8718 		break;
   8719 	}
   8720 }
   8721 
   8722 /*
   8723  * Setup sc_phytype and mii_{read|write}reg.
   8724  *
   8725  *  To identify PHY type, correct read/write function should be selected.
   8726  * To select correct read/write function, PCI ID or MAC type are required
   8727  * without accessing PHY registers.
   8728  *
   8729  *  On the first call of this function, PHY ID is not known yet. Check
   8730  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   8731  * result might be incorrect.
   8732  *
   8733  *  In the second call, PHY OUI and model is used to identify PHY type.
   8734  * It might not be perfpect because of the lack of compared entry, but it
   8735  * would be better than the first call.
   8736  *
   8737  *  If the detected new result and previous assumption is different,
   8738  * diagnous message will be printed.
   8739  */
   8740 static void
   8741 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   8742     uint16_t phy_model)
   8743 {
   8744 	device_t dev = sc->sc_dev;
   8745 	struct mii_data *mii = &sc->sc_mii;
   8746 	uint16_t new_phytype = WMPHY_UNKNOWN;
   8747 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   8748 	mii_readreg_t new_readreg;
   8749 	mii_writereg_t new_writereg;
   8750 
   8751 	if (mii->mii_readreg == NULL) {
   8752 		/*
   8753 		 *  This is the first call of this function. For ICH and PCH
   8754 		 * variants, it's difficult to determine the PHY access method
   8755 		 * by sc_type, so use the PCI product ID for some devices.
   8756 		 */
   8757 
   8758 		switch (sc->sc_pcidevid) {
   8759 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   8760 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   8761 			/* 82577 */
   8762 			new_phytype = WMPHY_82577;
   8763 			break;
   8764 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   8765 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   8766 			/* 82578 */
   8767 			new_phytype = WMPHY_82578;
   8768 			break;
   8769 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8770 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8771 			/* 82579 */
   8772 			new_phytype = WMPHY_82579;
   8773 			break;
   8774 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8775 		case PCI_PRODUCT_INTEL_82801I_BM:
   8776 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   8777 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8778 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8779 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8780 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8781 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8782 			/* ICH8, 9, 10 with 82567 */
   8783 			new_phytype = WMPHY_BM;
   8784 			break;
   8785 		default:
   8786 			break;
   8787 		}
   8788 	} else {
   8789 		/* It's not the first call. Use PHY OUI and model */
   8790 		switch (phy_oui) {
   8791 		case MII_OUI_ATHEROS: /* XXX ??? */
   8792 			switch (phy_model) {
   8793 			case 0x0004: /* XXX */
   8794 				new_phytype = WMPHY_82578;
   8795 				break;
   8796 			default:
   8797 				break;
   8798 			}
   8799 			break;
   8800 		case MII_OUI_xxMARVELL:
   8801 			switch (phy_model) {
   8802 			case MII_MODEL_xxMARVELL_I210:
   8803 				new_phytype = WMPHY_I210;
   8804 				break;
   8805 			case MII_MODEL_xxMARVELL_E1011:
   8806 			case MII_MODEL_xxMARVELL_E1000_3:
   8807 			case MII_MODEL_xxMARVELL_E1000_5:
   8808 			case MII_MODEL_xxMARVELL_E1112:
   8809 				new_phytype = WMPHY_M88;
   8810 				break;
   8811 			case MII_MODEL_xxMARVELL_E1149:
   8812 				new_phytype = WMPHY_BM;
   8813 				break;
   8814 			case MII_MODEL_xxMARVELL_E1111:
   8815 			case MII_MODEL_xxMARVELL_I347:
   8816 			case MII_MODEL_xxMARVELL_E1512:
   8817 			case MII_MODEL_xxMARVELL_E1340M:
   8818 			case MII_MODEL_xxMARVELL_E1543:
   8819 				new_phytype = WMPHY_M88;
   8820 				break;
   8821 			case MII_MODEL_xxMARVELL_I82563:
   8822 				new_phytype = WMPHY_GG82563;
   8823 				break;
   8824 			default:
   8825 				break;
   8826 			}
   8827 			break;
   8828 		case MII_OUI_INTEL:
   8829 			switch (phy_model) {
   8830 			case MII_MODEL_INTEL_I82577:
   8831 				new_phytype = WMPHY_82577;
   8832 				break;
   8833 			case MII_MODEL_INTEL_I82579:
   8834 				new_phytype = WMPHY_82579;
   8835 				break;
   8836 			case MII_MODEL_INTEL_I217:
   8837 				new_phytype = WMPHY_I217;
   8838 				break;
   8839 			case MII_MODEL_INTEL_I82580:
   8840 			case MII_MODEL_INTEL_I350:
   8841 				new_phytype = WMPHY_82580;
   8842 				break;
   8843 			default:
   8844 				break;
   8845 			}
   8846 			break;
   8847 		case MII_OUI_yyINTEL:
   8848 			switch (phy_model) {
   8849 			case MII_MODEL_yyINTEL_I82562G:
   8850 			case MII_MODEL_yyINTEL_I82562EM:
   8851 			case MII_MODEL_yyINTEL_I82562ET:
   8852 				new_phytype = WMPHY_IFE;
   8853 				break;
   8854 			case MII_MODEL_yyINTEL_IGP01E1000:
   8855 				new_phytype = WMPHY_IGP;
   8856 				break;
   8857 			case MII_MODEL_yyINTEL_I82566:
   8858 				new_phytype = WMPHY_IGP_3;
   8859 				break;
   8860 			default:
   8861 				break;
   8862 			}
   8863 			break;
   8864 		default:
   8865 			break;
   8866 		}
   8867 		if (new_phytype == WMPHY_UNKNOWN)
   8868 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   8869 			    __func__);
   8870 
   8871 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8872 		    && (sc->sc_phytype != new_phytype )) {
   8873 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   8874 			    "was incorrect. PHY type from PHY ID = %u\n",
   8875 			    sc->sc_phytype, new_phytype);
   8876 		}
   8877 	}
   8878 
   8879 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   8880 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   8881 		/* SGMII */
   8882 		new_readreg = wm_sgmii_readreg;
   8883 		new_writereg = wm_sgmii_writereg;
   8884 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   8885 		/* BM2 (phyaddr == 1) */
   8886 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8887 		    && (new_phytype != WMPHY_BM)
   8888 		    && (new_phytype != WMPHY_UNKNOWN))
   8889 			doubt_phytype = new_phytype;
   8890 		new_phytype = WMPHY_BM;
   8891 		new_readreg = wm_gmii_bm_readreg;
   8892 		new_writereg = wm_gmii_bm_writereg;
   8893 	} else if (sc->sc_type >= WM_T_PCH) {
   8894 		/* All PCH* use _hv_ */
   8895 		new_readreg = wm_gmii_hv_readreg;
   8896 		new_writereg = wm_gmii_hv_writereg;
   8897 	} else if (sc->sc_type >= WM_T_ICH8) {
   8898 		/* non-82567 ICH8, 9 and 10 */
   8899 		new_readreg = wm_gmii_i82544_readreg;
   8900 		new_writereg = wm_gmii_i82544_writereg;
   8901 	} else if (sc->sc_type >= WM_T_80003) {
   8902 		/* 80003 */
   8903 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8904 		    && (new_phytype != WMPHY_GG82563)
   8905 		    && (new_phytype != WMPHY_UNKNOWN))
   8906 			doubt_phytype = new_phytype;
   8907 		new_phytype = WMPHY_GG82563;
   8908 		new_readreg = wm_gmii_i80003_readreg;
   8909 		new_writereg = wm_gmii_i80003_writereg;
   8910 	} else if (sc->sc_type >= WM_T_I210) {
   8911 		/* I210 and I211 */
   8912 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8913 		    && (new_phytype != WMPHY_I210)
   8914 		    && (new_phytype != WMPHY_UNKNOWN))
   8915 			doubt_phytype = new_phytype;
   8916 		new_phytype = WMPHY_I210;
   8917 		new_readreg = wm_gmii_gs40g_readreg;
   8918 		new_writereg = wm_gmii_gs40g_writereg;
   8919 	} else if (sc->sc_type >= WM_T_82580) {
   8920 		/* 82580, I350 and I354 */
   8921 		new_readreg = wm_gmii_82580_readreg;
   8922 		new_writereg = wm_gmii_82580_writereg;
   8923 	} else if (sc->sc_type >= WM_T_82544) {
   8924 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8925 		new_readreg = wm_gmii_i82544_readreg;
   8926 		new_writereg = wm_gmii_i82544_writereg;
   8927 	} else {
   8928 		new_readreg = wm_gmii_i82543_readreg;
   8929 		new_writereg = wm_gmii_i82543_writereg;
   8930 	}
   8931 
   8932 	if (new_phytype == WMPHY_BM) {
   8933 		/* All BM use _bm_ */
   8934 		new_readreg = wm_gmii_bm_readreg;
   8935 		new_writereg = wm_gmii_bm_writereg;
   8936 	}
   8937 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8938 		/* All PCH* use _hv_ */
   8939 		new_readreg = wm_gmii_hv_readreg;
   8940 		new_writereg = wm_gmii_hv_writereg;
   8941 	}
   8942 
   8943 	/* Diag output */
   8944 	if (doubt_phytype != WMPHY_UNKNOWN)
   8945 		aprint_error_dev(dev, "Assumed new PHY type was "
   8946 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   8947 		    new_phytype);
   8948 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8949 	    && (sc->sc_phytype != new_phytype ))
   8950 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   8951 		    "was incorrect. New PHY type = %u\n",
   8952 		    sc->sc_phytype, new_phytype);
   8953 
   8954 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   8955 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   8956 
   8957 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   8958 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   8959 		    "function was incorrect.\n");
   8960 
   8961 	/* Update now */
   8962 	sc->sc_phytype = new_phytype;
   8963 	mii->mii_readreg = new_readreg;
   8964 	mii->mii_writereg = new_writereg;
   8965 }
   8966 
   8967 /*
   8968  * wm_get_phy_id_82575:
   8969  *
   8970  * Return PHY ID. Return -1 if it failed.
   8971  */
   8972 static int
   8973 wm_get_phy_id_82575(struct wm_softc *sc)
   8974 {
   8975 	uint32_t reg;
   8976 	int phyid = -1;
   8977 
   8978 	/* XXX */
   8979 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8980 		return -1;
   8981 
   8982 	if (wm_sgmii_uses_mdio(sc)) {
   8983 		switch (sc->sc_type) {
   8984 		case WM_T_82575:
   8985 		case WM_T_82576:
   8986 			reg = CSR_READ(sc, WMREG_MDIC);
   8987 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8988 			break;
   8989 		case WM_T_82580:
   8990 		case WM_T_I350:
   8991 		case WM_T_I354:
   8992 		case WM_T_I210:
   8993 		case WM_T_I211:
   8994 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8995 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8996 			break;
   8997 		default:
   8998 			return -1;
   8999 		}
   9000 	}
   9001 
   9002 	return phyid;
   9003 }
   9004 
   9005 
   9006 /*
   9007  * wm_gmii_mediainit:
   9008  *
   9009  *	Initialize media for use on 1000BASE-T devices.
   9010  */
   9011 static void
   9012 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9013 {
   9014 	device_t dev = sc->sc_dev;
   9015 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9016 	struct mii_data *mii = &sc->sc_mii;
   9017 	uint32_t reg;
   9018 
   9019 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9020 		device_xname(sc->sc_dev), __func__));
   9021 
   9022 	/* We have GMII. */
   9023 	sc->sc_flags |= WM_F_HAS_MII;
   9024 
   9025 	if (sc->sc_type == WM_T_80003)
   9026 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9027 	else
   9028 		sc->sc_tipg = TIPG_1000T_DFLT;
   9029 
   9030 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9031 	if ((sc->sc_type == WM_T_82580)
   9032 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9033 	    || (sc->sc_type == WM_T_I211)) {
   9034 		reg = CSR_READ(sc, WMREG_PHPM);
   9035 		reg &= ~PHPM_GO_LINK_D;
   9036 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9037 	}
   9038 
   9039 	/*
   9040 	 * Let the chip set speed/duplex on its own based on
   9041 	 * signals from the PHY.
   9042 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9043 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9044 	 */
   9045 	sc->sc_ctrl |= CTRL_SLU;
   9046 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9047 
   9048 	/* Initialize our media structures and probe the GMII. */
   9049 	mii->mii_ifp = ifp;
   9050 
   9051 	/*
   9052 	 * The first call of wm_mii_setup_phytype. The result might be
   9053 	 * incorrect.
   9054 	 */
   9055 	wm_gmii_setup_phytype(sc, 0, 0);
   9056 
   9057 	mii->mii_statchg = wm_gmii_statchg;
   9058 
   9059 	/* get PHY control from SMBus to PCIe */
   9060 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9061 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9062 		wm_smbustopci(sc);
   9063 
   9064 	wm_gmii_reset(sc);
   9065 
   9066 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9067 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9068 	    wm_gmii_mediastatus);
   9069 
   9070 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9071 	    || (sc->sc_type == WM_T_82580)
   9072 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9073 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9074 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9075 			/* Attach only one port */
   9076 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9077 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9078 		} else {
   9079 			int i, id;
   9080 			uint32_t ctrl_ext;
   9081 
   9082 			id = wm_get_phy_id_82575(sc);
   9083 			if (id != -1) {
   9084 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9085 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9086 			}
   9087 			if ((id == -1)
   9088 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9089 				/* Power on sgmii phy if it is disabled */
   9090 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9091 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9092 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9093 				CSR_WRITE_FLUSH(sc);
   9094 				delay(300*1000); /* XXX too long */
   9095 
   9096 				/* from 1 to 8 */
   9097 				for (i = 1; i < 8; i++)
   9098 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9099 					    0xffffffff, i, MII_OFFSET_ANY,
   9100 					    MIIF_DOPAUSE);
   9101 
   9102 				/* restore previous sfp cage power state */
   9103 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9104 			}
   9105 		}
   9106 	} else {
   9107 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9108 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9109 	}
   9110 
   9111 	/*
   9112 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9113 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9114 	 */
   9115 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9116 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9117 		wm_set_mdio_slow_mode_hv(sc);
   9118 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9119 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9120 	}
   9121 
   9122 	/*
   9123 	 * (For ICH8 variants)
   9124 	 * If PHY detection failed, use BM's r/w function and retry.
   9125 	 */
   9126 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9127 		/* if failed, retry with *_bm_* */
   9128 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9129 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9130 		    sc->sc_phytype);
   9131 		sc->sc_phytype = WMPHY_BM;
   9132 		mii->mii_readreg = wm_gmii_bm_readreg;
   9133 		mii->mii_writereg = wm_gmii_bm_writereg;
   9134 
   9135 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9136 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9137 	}
   9138 
   9139 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9140 		/* Any PHY wasn't find */
   9141 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9142 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9143 		sc->sc_phytype = WMPHY_NONE;
   9144 	} else {
   9145 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9146 
   9147 		/*
   9148 		 * PHY Found! Check PHY type again by the second call of
   9149 		 * wm_mii_setup_phytype.
   9150 		 */
   9151 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9152 		    child->mii_mpd_model);
   9153 
   9154 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9155 	}
   9156 }
   9157 
   9158 /*
   9159  * wm_gmii_mediachange:	[ifmedia interface function]
   9160  *
   9161  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9162  */
   9163 static int
   9164 wm_gmii_mediachange(struct ifnet *ifp)
   9165 {
   9166 	struct wm_softc *sc = ifp->if_softc;
   9167 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9168 	int rc;
   9169 
   9170 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9171 		device_xname(sc->sc_dev), __func__));
   9172 	if ((ifp->if_flags & IFF_UP) == 0)
   9173 		return 0;
   9174 
   9175 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9176 	sc->sc_ctrl |= CTRL_SLU;
   9177 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9178 	    || (sc->sc_type > WM_T_82543)) {
   9179 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9180 	} else {
   9181 		sc->sc_ctrl &= ~CTRL_ASDE;
   9182 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9183 		if (ife->ifm_media & IFM_FDX)
   9184 			sc->sc_ctrl |= CTRL_FD;
   9185 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9186 		case IFM_10_T:
   9187 			sc->sc_ctrl |= CTRL_SPEED_10;
   9188 			break;
   9189 		case IFM_100_TX:
   9190 			sc->sc_ctrl |= CTRL_SPEED_100;
   9191 			break;
   9192 		case IFM_1000_T:
   9193 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9194 			break;
   9195 		default:
   9196 			panic("wm_gmii_mediachange: bad media 0x%x",
   9197 			    ife->ifm_media);
   9198 		}
   9199 	}
   9200 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9201 	if (sc->sc_type <= WM_T_82543)
   9202 		wm_gmii_reset(sc);
   9203 
   9204 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9205 		return 0;
   9206 	return rc;
   9207 }
   9208 
   9209 /*
   9210  * wm_gmii_mediastatus:	[ifmedia interface function]
   9211  *
   9212  *	Get the current interface media status on a 1000BASE-T device.
   9213  */
   9214 static void
   9215 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9216 {
   9217 	struct wm_softc *sc = ifp->if_softc;
   9218 
   9219 	ether_mediastatus(ifp, ifmr);
   9220 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9221 	    | sc->sc_flowflags;
   9222 }
   9223 
   9224 #define	MDI_IO		CTRL_SWDPIN(2)
   9225 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9226 #define	MDI_CLK		CTRL_SWDPIN(3)
   9227 
   9228 static void
   9229 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9230 {
   9231 	uint32_t i, v;
   9232 
   9233 	v = CSR_READ(sc, WMREG_CTRL);
   9234 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9235 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9236 
   9237 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9238 		if (data & i)
   9239 			v |= MDI_IO;
   9240 		else
   9241 			v &= ~MDI_IO;
   9242 		CSR_WRITE(sc, WMREG_CTRL, v);
   9243 		CSR_WRITE_FLUSH(sc);
   9244 		delay(10);
   9245 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9246 		CSR_WRITE_FLUSH(sc);
   9247 		delay(10);
   9248 		CSR_WRITE(sc, WMREG_CTRL, v);
   9249 		CSR_WRITE_FLUSH(sc);
   9250 		delay(10);
   9251 	}
   9252 }
   9253 
   9254 static uint32_t
   9255 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9256 {
   9257 	uint32_t v, i, data = 0;
   9258 
   9259 	v = CSR_READ(sc, WMREG_CTRL);
   9260 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9261 	v |= CTRL_SWDPIO(3);
   9262 
   9263 	CSR_WRITE(sc, WMREG_CTRL, v);
   9264 	CSR_WRITE_FLUSH(sc);
   9265 	delay(10);
   9266 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9267 	CSR_WRITE_FLUSH(sc);
   9268 	delay(10);
   9269 	CSR_WRITE(sc, WMREG_CTRL, v);
   9270 	CSR_WRITE_FLUSH(sc);
   9271 	delay(10);
   9272 
   9273 	for (i = 0; i < 16; i++) {
   9274 		data <<= 1;
   9275 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9276 		CSR_WRITE_FLUSH(sc);
   9277 		delay(10);
   9278 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9279 			data |= 1;
   9280 		CSR_WRITE(sc, WMREG_CTRL, v);
   9281 		CSR_WRITE_FLUSH(sc);
   9282 		delay(10);
   9283 	}
   9284 
   9285 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9286 	CSR_WRITE_FLUSH(sc);
   9287 	delay(10);
   9288 	CSR_WRITE(sc, WMREG_CTRL, v);
   9289 	CSR_WRITE_FLUSH(sc);
   9290 	delay(10);
   9291 
   9292 	return data;
   9293 }
   9294 
   9295 #undef MDI_IO
   9296 #undef MDI_DIR
   9297 #undef MDI_CLK
   9298 
   9299 /*
   9300  * wm_gmii_i82543_readreg:	[mii interface function]
   9301  *
   9302  *	Read a PHY register on the GMII (i82543 version).
   9303  */
   9304 static int
   9305 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   9306 {
   9307 	struct wm_softc *sc = device_private(self);
   9308 	int rv;
   9309 
   9310 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9311 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9312 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9313 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9314 
   9315 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9316 	    device_xname(sc->sc_dev), phy, reg, rv));
   9317 
   9318 	return rv;
   9319 }
   9320 
   9321 /*
   9322  * wm_gmii_i82543_writereg:	[mii interface function]
   9323  *
   9324  *	Write a PHY register on the GMII (i82543 version).
   9325  */
   9326 static void
   9327 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   9328 {
   9329 	struct wm_softc *sc = device_private(self);
   9330 
   9331 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9332 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9333 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9334 	    (MII_COMMAND_START << 30), 32);
   9335 }
   9336 
   9337 /*
   9338  * wm_gmii_mdic_readreg:	[mii interface function]
   9339  *
   9340  *	Read a PHY register on the GMII.
   9341  */
   9342 static int
   9343 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   9344 {
   9345 	struct wm_softc *sc = device_private(self);
   9346 	uint32_t mdic = 0;
   9347 	int i, rv;
   9348 
   9349 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9350 	    MDIC_REGADD(reg));
   9351 
   9352 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9353 		mdic = CSR_READ(sc, WMREG_MDIC);
   9354 		if (mdic & MDIC_READY)
   9355 			break;
   9356 		delay(50);
   9357 	}
   9358 
   9359 	if ((mdic & MDIC_READY) == 0) {
   9360 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9361 		    device_xname(sc->sc_dev), phy, reg);
   9362 		rv = 0;
   9363 	} else if (mdic & MDIC_E) {
   9364 #if 0 /* This is normal if no PHY is present. */
   9365 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9366 		    device_xname(sc->sc_dev), phy, reg);
   9367 #endif
   9368 		rv = 0;
   9369 	} else {
   9370 		rv = MDIC_DATA(mdic);
   9371 		if (rv == 0xffff)
   9372 			rv = 0;
   9373 	}
   9374 
   9375 	return rv;
   9376 }
   9377 
   9378 /*
   9379  * wm_gmii_mdic_writereg:	[mii interface function]
   9380  *
   9381  *	Write a PHY register on the GMII.
   9382  */
   9383 static void
   9384 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   9385 {
   9386 	struct wm_softc *sc = device_private(self);
   9387 	uint32_t mdic = 0;
   9388 	int i;
   9389 
   9390 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9391 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9392 
   9393 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9394 		mdic = CSR_READ(sc, WMREG_MDIC);
   9395 		if (mdic & MDIC_READY)
   9396 			break;
   9397 		delay(50);
   9398 	}
   9399 
   9400 	if ((mdic & MDIC_READY) == 0)
   9401 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9402 		    device_xname(sc->sc_dev), phy, reg);
   9403 	else if (mdic & MDIC_E)
   9404 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9405 		    device_xname(sc->sc_dev), phy, reg);
   9406 }
   9407 
   9408 /*
   9409  * wm_gmii_i82544_readreg:	[mii interface function]
   9410  *
   9411  *	Read a PHY register on the GMII.
   9412  */
   9413 static int
   9414 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   9415 {
   9416 	struct wm_softc *sc = device_private(self);
   9417 	int rv;
   9418 
   9419 	if (sc->phy.acquire(sc)) {
   9420 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9421 		    __func__);
   9422 		return 0;
   9423 	}
   9424 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9425 	sc->phy.release(sc);
   9426 
   9427 	return rv;
   9428 }
   9429 
   9430 /*
   9431  * wm_gmii_i82544_writereg:	[mii interface function]
   9432  *
   9433  *	Write a PHY register on the GMII.
   9434  */
   9435 static void
   9436 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   9437 {
   9438 	struct wm_softc *sc = device_private(self);
   9439 
   9440 	if (sc->phy.acquire(sc)) {
   9441 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9442 		    __func__);
   9443 	}
   9444 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9445 	sc->phy.release(sc);
   9446 }
   9447 
   9448 /*
   9449  * wm_gmii_i80003_readreg:	[mii interface function]
   9450  *
   9451  *	Read a PHY register on the kumeran
   9452  * This could be handled by the PHY layer if we didn't have to lock the
   9453  * ressource ...
   9454  */
   9455 static int
   9456 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   9457 {
   9458 	struct wm_softc *sc = device_private(self);
   9459 	int rv;
   9460 
   9461 	if (phy != 1) /* only one PHY on kumeran bus */
   9462 		return 0;
   9463 
   9464 	if (sc->phy.acquire(sc)) {
   9465 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9466 		    __func__);
   9467 		return 0;
   9468 	}
   9469 
   9470 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9471 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9472 		    reg >> GG82563_PAGE_SHIFT);
   9473 	} else {
   9474 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9475 		    reg >> GG82563_PAGE_SHIFT);
   9476 	}
   9477 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9478 	delay(200);
   9479 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9480 	delay(200);
   9481 	sc->phy.release(sc);
   9482 
   9483 	return rv;
   9484 }
   9485 
   9486 /*
   9487  * wm_gmii_i80003_writereg:	[mii interface function]
   9488  *
   9489  *	Write a PHY register on the kumeran.
   9490  * This could be handled by the PHY layer if we didn't have to lock the
   9491  * ressource ...
   9492  */
   9493 static void
   9494 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   9495 {
   9496 	struct wm_softc *sc = device_private(self);
   9497 
   9498 	if (phy != 1) /* only one PHY on kumeran bus */
   9499 		return;
   9500 
   9501 	if (sc->phy.acquire(sc)) {
   9502 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9503 		    __func__);
   9504 		return;
   9505 	}
   9506 
   9507 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9508 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9509 		    reg >> GG82563_PAGE_SHIFT);
   9510 	} else {
   9511 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9512 		    reg >> GG82563_PAGE_SHIFT);
   9513 	}
   9514 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9515 	delay(200);
   9516 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9517 	delay(200);
   9518 
   9519 	sc->phy.release(sc);
   9520 }
   9521 
   9522 /*
   9523  * wm_gmii_bm_readreg:	[mii interface function]
   9524  *
   9525  *	Read a PHY register on the kumeran
   9526  * This could be handled by the PHY layer if we didn't have to lock the
   9527  * ressource ...
   9528  */
   9529 static int
   9530 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   9531 {
   9532 	struct wm_softc *sc = device_private(self);
   9533 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9534 	uint16_t val;
   9535 	int rv;
   9536 
   9537 	if (sc->phy.acquire(sc)) {
   9538 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9539 		    __func__);
   9540 		return 0;
   9541 	}
   9542 
   9543 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9544 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9545 		    || (reg == 31)) ? 1 : phy;
   9546 	/* Page 800 works differently than the rest so it has its own func */
   9547 	if (page == BM_WUC_PAGE) {
   9548 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9549 		rv = val;
   9550 		goto release;
   9551 	}
   9552 
   9553 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9554 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9555 		    && (sc->sc_type != WM_T_82583))
   9556 			wm_gmii_mdic_writereg(self, phy,
   9557 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9558 		else
   9559 			wm_gmii_mdic_writereg(self, phy,
   9560 			    BME1000_PHY_PAGE_SELECT, page);
   9561 	}
   9562 
   9563 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9564 
   9565 release:
   9566 	sc->phy.release(sc);
   9567 	return rv;
   9568 }
   9569 
   9570 /*
   9571  * wm_gmii_bm_writereg:	[mii interface function]
   9572  *
   9573  *	Write a PHY register on the kumeran.
   9574  * This could be handled by the PHY layer if we didn't have to lock the
   9575  * ressource ...
   9576  */
   9577 static void
   9578 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9579 {
   9580 	struct wm_softc *sc = device_private(self);
   9581 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9582 
   9583 	if (sc->phy.acquire(sc)) {
   9584 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9585 		    __func__);
   9586 		return;
   9587 	}
   9588 
   9589 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9590 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9591 		    || (reg == 31)) ? 1 : phy;
   9592 	/* Page 800 works differently than the rest so it has its own func */
   9593 	if (page == BM_WUC_PAGE) {
   9594 		uint16_t tmp;
   9595 
   9596 		tmp = val;
   9597 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9598 		goto release;
   9599 	}
   9600 
   9601 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9602 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9603 		    && (sc->sc_type != WM_T_82583))
   9604 			wm_gmii_mdic_writereg(self, phy,
   9605 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9606 		else
   9607 			wm_gmii_mdic_writereg(self, phy,
   9608 			    BME1000_PHY_PAGE_SELECT, page);
   9609 	}
   9610 
   9611 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9612 
   9613 release:
   9614 	sc->phy.release(sc);
   9615 }
   9616 
   9617 static void
   9618 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9619 {
   9620 	struct wm_softc *sc = device_private(self);
   9621 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9622 	uint16_t wuce, reg;
   9623 
   9624 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9625 		device_xname(sc->sc_dev), __func__));
   9626 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9627 	if (sc->sc_type == WM_T_PCH) {
   9628 		/* XXX e1000 driver do nothing... why? */
   9629 	}
   9630 
   9631 	/*
   9632 	 * 1) Enable PHY wakeup register first.
   9633 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9634 	 */
   9635 
   9636 	/* Set page 769 */
   9637 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9638 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9639 
   9640 	/* Read WUCE and save it */
   9641 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9642 
   9643 	reg = wuce | BM_WUC_ENABLE_BIT;
   9644 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9645 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9646 
   9647 	/* Select page 800 */
   9648 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9649 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9650 
   9651 	/*
   9652 	 * 2) Access PHY wakeup register.
   9653 	 * See e1000_access_phy_wakeup_reg_bm.
   9654 	 */
   9655 
   9656 	/* Write page 800 */
   9657 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9658 
   9659 	if (rd)
   9660 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9661 	else
   9662 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9663 
   9664 	/*
   9665 	 * 3) Disable PHY wakeup register.
   9666 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9667 	 */
   9668 	/* Set page 769 */
   9669 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9670 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9671 
   9672 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9673 }
   9674 
   9675 /*
   9676  * wm_gmii_hv_readreg:	[mii interface function]
   9677  *
   9678  *	Read a PHY register on the kumeran
   9679  * This could be handled by the PHY layer if we didn't have to lock the
   9680  * ressource ...
   9681  */
   9682 static int
   9683 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9684 {
   9685 	struct wm_softc *sc = device_private(self);
   9686 	int rv;
   9687 
   9688 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9689 		device_xname(sc->sc_dev), __func__));
   9690 	if (sc->phy.acquire(sc)) {
   9691 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9692 		    __func__);
   9693 		return 0;
   9694 	}
   9695 
   9696 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9697 	sc->phy.release(sc);
   9698 	return rv;
   9699 }
   9700 
   9701 static int
   9702 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9703 {
   9704 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9705 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9706 	uint16_t val;
   9707 	int rv;
   9708 
   9709 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9710 
   9711 	/* Page 800 works differently than the rest so it has its own func */
   9712 	if (page == BM_WUC_PAGE) {
   9713 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9714 		return val;
   9715 	}
   9716 
   9717 	/*
   9718 	 * Lower than page 768 works differently than the rest so it has its
   9719 	 * own func
   9720 	 */
   9721 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9722 		printf("gmii_hv_readreg!!!\n");
   9723 		return 0;
   9724 	}
   9725 
   9726 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9727 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9728 		    page << BME1000_PAGE_SHIFT);
   9729 	}
   9730 
   9731 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9732 	return rv;
   9733 }
   9734 
   9735 /*
   9736  * wm_gmii_hv_writereg:	[mii interface function]
   9737  *
   9738  *	Write a PHY register on the kumeran.
   9739  * This could be handled by the PHY layer if we didn't have to lock the
   9740  * ressource ...
   9741  */
   9742 static void
   9743 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9744 {
   9745 	struct wm_softc *sc = device_private(self);
   9746 
   9747 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9748 		device_xname(sc->sc_dev), __func__));
   9749 
   9750 	if (sc->phy.acquire(sc)) {
   9751 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9752 		    __func__);
   9753 		return;
   9754 	}
   9755 
   9756 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9757 	sc->phy.release(sc);
   9758 }
   9759 
   9760 static void
   9761 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9762 {
   9763 	struct wm_softc *sc = device_private(self);
   9764 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9765 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9766 
   9767 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9768 
   9769 	/* Page 800 works differently than the rest so it has its own func */
   9770 	if (page == BM_WUC_PAGE) {
   9771 		uint16_t tmp;
   9772 
   9773 		tmp = val;
   9774 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9775 		return;
   9776 	}
   9777 
   9778 	/*
   9779 	 * Lower than page 768 works differently than the rest so it has its
   9780 	 * own func
   9781 	 */
   9782 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9783 		printf("gmii_hv_writereg!!!\n");
   9784 		return;
   9785 	}
   9786 
   9787 	{
   9788 		/*
   9789 		 * XXX Workaround MDIO accesses being disabled after entering
   9790 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9791 		 * register is set)
   9792 		 */
   9793 		if (sc->sc_phytype == WMPHY_82578) {
   9794 			struct mii_softc *child;
   9795 
   9796 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9797 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9798 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9799 			    && ((val & (1 << 11)) != 0)) {
   9800 				printf("XXX need workaround\n");
   9801 			}
   9802 		}
   9803 
   9804 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9805 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9806 			    page << BME1000_PAGE_SHIFT);
   9807 		}
   9808 	}
   9809 
   9810 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   9811 }
   9812 
   9813 /*
   9814  * wm_gmii_82580_readreg:	[mii interface function]
   9815  *
   9816  *	Read a PHY register on the 82580 and I350.
   9817  * This could be handled by the PHY layer if we didn't have to lock the
   9818  * ressource ...
   9819  */
   9820 static int
   9821 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   9822 {
   9823 	struct wm_softc *sc = device_private(self);
   9824 	int rv;
   9825 
   9826 	if (sc->phy.acquire(sc) != 0) {
   9827 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9828 		    __func__);
   9829 		return 0;
   9830 	}
   9831 
   9832 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9833 
   9834 	sc->phy.release(sc);
   9835 	return rv;
   9836 }
   9837 
   9838 /*
   9839  * wm_gmii_82580_writereg:	[mii interface function]
   9840  *
   9841  *	Write a PHY register on the 82580 and I350.
   9842  * This could be handled by the PHY layer if we didn't have to lock the
   9843  * ressource ...
   9844  */
   9845 static void
   9846 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   9847 {
   9848 	struct wm_softc *sc = device_private(self);
   9849 
   9850 	if (sc->phy.acquire(sc) != 0) {
   9851 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9852 		    __func__);
   9853 		return;
   9854 	}
   9855 
   9856 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9857 
   9858 	sc->phy.release(sc);
   9859 }
   9860 
   9861 /*
   9862  * wm_gmii_gs40g_readreg:	[mii interface function]
   9863  *
   9864  *	Read a PHY register on the I2100 and I211.
   9865  * This could be handled by the PHY layer if we didn't have to lock the
   9866  * ressource ...
   9867  */
   9868 static int
   9869 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   9870 {
   9871 	struct wm_softc *sc = device_private(self);
   9872 	int page, offset;
   9873 	int rv;
   9874 
   9875 	/* Acquire semaphore */
   9876 	if (sc->phy.acquire(sc)) {
   9877 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9878 		    __func__);
   9879 		return 0;
   9880 	}
   9881 
   9882 	/* Page select */
   9883 	page = reg >> GS40G_PAGE_SHIFT;
   9884 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9885 
   9886 	/* Read reg */
   9887 	offset = reg & GS40G_OFFSET_MASK;
   9888 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   9889 
   9890 	sc->phy.release(sc);
   9891 	return rv;
   9892 }
   9893 
   9894 /*
   9895  * wm_gmii_gs40g_writereg:	[mii interface function]
   9896  *
   9897  *	Write a PHY register on the I210 and I211.
   9898  * This could be handled by the PHY layer if we didn't have to lock the
   9899  * ressource ...
   9900  */
   9901 static void
   9902 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   9903 {
   9904 	struct wm_softc *sc = device_private(self);
   9905 	int page, offset;
   9906 
   9907 	/* Acquire semaphore */
   9908 	if (sc->phy.acquire(sc)) {
   9909 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9910 		    __func__);
   9911 		return;
   9912 	}
   9913 
   9914 	/* Page select */
   9915 	page = reg >> GS40G_PAGE_SHIFT;
   9916 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9917 
   9918 	/* Write reg */
   9919 	offset = reg & GS40G_OFFSET_MASK;
   9920 	wm_gmii_mdic_writereg(self, phy, offset, val);
   9921 
   9922 	/* Release semaphore */
   9923 	sc->phy.release(sc);
   9924 }
   9925 
   9926 /*
   9927  * wm_gmii_statchg:	[mii interface function]
   9928  *
   9929  *	Callback from MII layer when media changes.
   9930  */
   9931 static void
   9932 wm_gmii_statchg(struct ifnet *ifp)
   9933 {
   9934 	struct wm_softc *sc = ifp->if_softc;
   9935 	struct mii_data *mii = &sc->sc_mii;
   9936 
   9937 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   9938 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9939 	sc->sc_fcrtl &= ~FCRTL_XONE;
   9940 
   9941 	/*
   9942 	 * Get flow control negotiation result.
   9943 	 */
   9944 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   9945 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   9946 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   9947 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   9948 	}
   9949 
   9950 	if (sc->sc_flowflags & IFM_FLOW) {
   9951 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   9952 			sc->sc_ctrl |= CTRL_TFCE;
   9953 			sc->sc_fcrtl |= FCRTL_XONE;
   9954 		}
   9955 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   9956 			sc->sc_ctrl |= CTRL_RFCE;
   9957 	}
   9958 
   9959 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   9960 		DPRINTF(WM_DEBUG_LINK,
   9961 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   9962 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9963 	} else {
   9964 		DPRINTF(WM_DEBUG_LINK,
   9965 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   9966 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9967 	}
   9968 
   9969 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9970 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9971 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   9972 						 : WMREG_FCRTL, sc->sc_fcrtl);
   9973 	if (sc->sc_type == WM_T_80003) {
   9974 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   9975 		case IFM_1000_T:
   9976 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9977 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   9978 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9979 			break;
   9980 		default:
   9981 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9982 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   9983 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   9984 			break;
   9985 		}
   9986 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   9987 	}
   9988 }
   9989 
   9990 /* kumeran related (80003, ICH* and PCH*) */
   9991 
   9992 /*
   9993  * wm_kmrn_readreg:
   9994  *
   9995  *	Read a kumeran register
   9996  */
   9997 static int
   9998 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   9999 {
   10000 	int rv;
   10001 
   10002 	if (sc->sc_type == WM_T_80003)
   10003 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10004 	else
   10005 		rv = sc->phy.acquire(sc);
   10006 	if (rv != 0) {
   10007 		aprint_error_dev(sc->sc_dev,
   10008 		    "%s: failed to get semaphore\n", __func__);
   10009 		return 0;
   10010 	}
   10011 
   10012 	rv = wm_kmrn_readreg_locked(sc, reg);
   10013 
   10014 	if (sc->sc_type == WM_T_80003)
   10015 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10016 	else
   10017 		sc->phy.release(sc);
   10018 
   10019 	return rv;
   10020 }
   10021 
   10022 static int
   10023 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10024 {
   10025 	int rv;
   10026 
   10027 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10028 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10029 	    KUMCTRLSTA_REN);
   10030 	CSR_WRITE_FLUSH(sc);
   10031 	delay(2);
   10032 
   10033 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10034 
   10035 	return rv;
   10036 }
   10037 
   10038 /*
   10039  * wm_kmrn_writereg:
   10040  *
   10041  *	Write a kumeran register
   10042  */
   10043 static void
   10044 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10045 {
   10046 	int rv;
   10047 
   10048 	if (sc->sc_type == WM_T_80003)
   10049 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10050 	else
   10051 		rv = sc->phy.acquire(sc);
   10052 	if (rv != 0) {
   10053 		aprint_error_dev(sc->sc_dev,
   10054 		    "%s: failed to get semaphore\n", __func__);
   10055 		return;
   10056 	}
   10057 
   10058 	wm_kmrn_writereg_locked(sc, reg, val);
   10059 
   10060 	if (sc->sc_type == WM_T_80003)
   10061 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10062 	else
   10063 		sc->phy.release(sc);
   10064 }
   10065 
   10066 static void
   10067 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10068 {
   10069 
   10070 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10071 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10072 	    (val & KUMCTRLSTA_MASK));
   10073 }
   10074 
   10075 /* SGMII related */
   10076 
   10077 /*
   10078  * wm_sgmii_uses_mdio
   10079  *
   10080  * Check whether the transaction is to the internal PHY or the external
   10081  * MDIO interface. Return true if it's MDIO.
   10082  */
   10083 static bool
   10084 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10085 {
   10086 	uint32_t reg;
   10087 	bool ismdio = false;
   10088 
   10089 	switch (sc->sc_type) {
   10090 	case WM_T_82575:
   10091 	case WM_T_82576:
   10092 		reg = CSR_READ(sc, WMREG_MDIC);
   10093 		ismdio = ((reg & MDIC_DEST) != 0);
   10094 		break;
   10095 	case WM_T_82580:
   10096 	case WM_T_I350:
   10097 	case WM_T_I354:
   10098 	case WM_T_I210:
   10099 	case WM_T_I211:
   10100 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10101 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10102 		break;
   10103 	default:
   10104 		break;
   10105 	}
   10106 
   10107 	return ismdio;
   10108 }
   10109 
   10110 /*
   10111  * wm_sgmii_readreg:	[mii interface function]
   10112  *
   10113  *	Read a PHY register on the SGMII
   10114  * This could be handled by the PHY layer if we didn't have to lock the
   10115  * ressource ...
   10116  */
   10117 static int
   10118 wm_sgmii_readreg(device_t self, int phy, int reg)
   10119 {
   10120 	struct wm_softc *sc = device_private(self);
   10121 	uint32_t i2ccmd;
   10122 	int i, rv;
   10123 
   10124 	if (sc->phy.acquire(sc)) {
   10125 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10126 		    __func__);
   10127 		return 0;
   10128 	}
   10129 
   10130 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10131 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10132 	    | I2CCMD_OPCODE_READ;
   10133 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10134 
   10135 	/* Poll the ready bit */
   10136 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10137 		delay(50);
   10138 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10139 		if (i2ccmd & I2CCMD_READY)
   10140 			break;
   10141 	}
   10142 	if ((i2ccmd & I2CCMD_READY) == 0)
   10143 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   10144 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10145 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10146 
   10147 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10148 
   10149 	sc->phy.release(sc);
   10150 	return rv;
   10151 }
   10152 
   10153 /*
   10154  * wm_sgmii_writereg:	[mii interface function]
   10155  *
   10156  *	Write a PHY register on the SGMII.
   10157  * This could be handled by the PHY layer if we didn't have to lock the
   10158  * ressource ...
   10159  */
   10160 static void
   10161 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   10162 {
   10163 	struct wm_softc *sc = device_private(self);
   10164 	uint32_t i2ccmd;
   10165 	int i;
   10166 	int val_swapped;
   10167 
   10168 	if (sc->phy.acquire(sc) != 0) {
   10169 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10170 		    __func__);
   10171 		return;
   10172 	}
   10173 	/* Swap the data bytes for the I2C interface */
   10174 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10175 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10176 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10177 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10178 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10179 
   10180 	/* Poll the ready bit */
   10181 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10182 		delay(50);
   10183 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10184 		if (i2ccmd & I2CCMD_READY)
   10185 			break;
   10186 	}
   10187 	if ((i2ccmd & I2CCMD_READY) == 0)
   10188 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   10189 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10190 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10191 
   10192 	sc->phy.release(sc);
   10193 }
   10194 
   10195 /* TBI related */
   10196 
   10197 /*
   10198  * wm_tbi_mediainit:
   10199  *
   10200  *	Initialize media for use on 1000BASE-X devices.
   10201  */
   10202 static void
   10203 wm_tbi_mediainit(struct wm_softc *sc)
   10204 {
   10205 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10206 	const char *sep = "";
   10207 
   10208 	if (sc->sc_type < WM_T_82543)
   10209 		sc->sc_tipg = TIPG_WM_DFLT;
   10210 	else
   10211 		sc->sc_tipg = TIPG_LG_DFLT;
   10212 
   10213 	sc->sc_tbi_serdes_anegticks = 5;
   10214 
   10215 	/* Initialize our media structures */
   10216 	sc->sc_mii.mii_ifp = ifp;
   10217 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10218 
   10219 	if ((sc->sc_type >= WM_T_82575)
   10220 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10221 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10222 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10223 	else
   10224 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10225 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10226 
   10227 	/*
   10228 	 * SWD Pins:
   10229 	 *
   10230 	 *	0 = Link LED (output)
   10231 	 *	1 = Loss Of Signal (input)
   10232 	 */
   10233 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10234 
   10235 	/* XXX Perhaps this is only for TBI */
   10236 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10237 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10238 
   10239 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10240 		sc->sc_ctrl &= ~CTRL_LRST;
   10241 
   10242 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10243 
   10244 #define	ADD(ss, mm, dd)							\
   10245 do {									\
   10246 	aprint_normal("%s%s", sep, ss);					\
   10247 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10248 	sep = ", ";							\
   10249 } while (/*CONSTCOND*/0)
   10250 
   10251 	aprint_normal_dev(sc->sc_dev, "");
   10252 
   10253 	if (sc->sc_type == WM_T_I354) {
   10254 		uint32_t status;
   10255 
   10256 		status = CSR_READ(sc, WMREG_STATUS);
   10257 		if (((status & STATUS_2P5_SKU) != 0)
   10258 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10259 			ADD("2500baseKX-FDX", IFM_2500_SX | IFM_FDX,ANAR_X_FD);
   10260 		} else
   10261 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX,ANAR_X_FD);
   10262 	} else if (sc->sc_type == WM_T_82545) {
   10263 		/* Only 82545 is LX (XXX except SFP) */
   10264 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10265 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10266 	} else {
   10267 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10268 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10269 	}
   10270 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10271 	aprint_normal("\n");
   10272 
   10273 #undef ADD
   10274 
   10275 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10276 }
   10277 
   10278 /*
   10279  * wm_tbi_mediachange:	[ifmedia interface function]
   10280  *
   10281  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10282  */
   10283 static int
   10284 wm_tbi_mediachange(struct ifnet *ifp)
   10285 {
   10286 	struct wm_softc *sc = ifp->if_softc;
   10287 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10288 	uint32_t status;
   10289 	int i;
   10290 
   10291 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10292 		/* XXX need some work for >= 82571 and < 82575 */
   10293 		if (sc->sc_type < WM_T_82575)
   10294 			return 0;
   10295 	}
   10296 
   10297 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10298 	    || (sc->sc_type >= WM_T_82575))
   10299 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10300 
   10301 	sc->sc_ctrl &= ~CTRL_LRST;
   10302 	sc->sc_txcw = TXCW_ANE;
   10303 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10304 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10305 	else if (ife->ifm_media & IFM_FDX)
   10306 		sc->sc_txcw |= TXCW_FD;
   10307 	else
   10308 		sc->sc_txcw |= TXCW_HD;
   10309 
   10310 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10311 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10312 
   10313 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10314 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10315 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10316 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10317 	CSR_WRITE_FLUSH(sc);
   10318 	delay(1000);
   10319 
   10320 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10321 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10322 
   10323 	/*
   10324 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10325 	 * optics detect a signal, 0 if they don't.
   10326 	 */
   10327 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10328 		/* Have signal; wait for the link to come up. */
   10329 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10330 			delay(10000);
   10331 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10332 				break;
   10333 		}
   10334 
   10335 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10336 			    device_xname(sc->sc_dev),i));
   10337 
   10338 		status = CSR_READ(sc, WMREG_STATUS);
   10339 		DPRINTF(WM_DEBUG_LINK,
   10340 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10341 			device_xname(sc->sc_dev),status, STATUS_LU));
   10342 		if (status & STATUS_LU) {
   10343 			/* Link is up. */
   10344 			DPRINTF(WM_DEBUG_LINK,
   10345 			    ("%s: LINK: set media -> link up %s\n",
   10346 			    device_xname(sc->sc_dev),
   10347 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10348 
   10349 			/*
   10350 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10351 			 * so we should update sc->sc_ctrl
   10352 			 */
   10353 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10354 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10355 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10356 			if (status & STATUS_FD)
   10357 				sc->sc_tctl |=
   10358 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10359 			else
   10360 				sc->sc_tctl |=
   10361 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10362 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10363 				sc->sc_fcrtl |= FCRTL_XONE;
   10364 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10365 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10366 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10367 				      sc->sc_fcrtl);
   10368 			sc->sc_tbi_linkup = 1;
   10369 		} else {
   10370 			if (i == WM_LINKUP_TIMEOUT)
   10371 				wm_check_for_link(sc);
   10372 			/* Link is down. */
   10373 			DPRINTF(WM_DEBUG_LINK,
   10374 			    ("%s: LINK: set media -> link down\n",
   10375 			    device_xname(sc->sc_dev)));
   10376 			sc->sc_tbi_linkup = 0;
   10377 		}
   10378 	} else {
   10379 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10380 		    device_xname(sc->sc_dev)));
   10381 		sc->sc_tbi_linkup = 0;
   10382 	}
   10383 
   10384 	wm_tbi_serdes_set_linkled(sc);
   10385 
   10386 	return 0;
   10387 }
   10388 
   10389 /*
   10390  * wm_tbi_mediastatus:	[ifmedia interface function]
   10391  *
   10392  *	Get the current interface media status on a 1000BASE-X device.
   10393  */
   10394 static void
   10395 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10396 {
   10397 	struct wm_softc *sc = ifp->if_softc;
   10398 	uint32_t ctrl, status;
   10399 
   10400 	ifmr->ifm_status = IFM_AVALID;
   10401 	ifmr->ifm_active = IFM_ETHER;
   10402 
   10403 	status = CSR_READ(sc, WMREG_STATUS);
   10404 	if ((status & STATUS_LU) == 0) {
   10405 		ifmr->ifm_active |= IFM_NONE;
   10406 		return;
   10407 	}
   10408 
   10409 	ifmr->ifm_status |= IFM_ACTIVE;
   10410 	/* Only 82545 is LX */
   10411 	if (sc->sc_type == WM_T_82545)
   10412 		ifmr->ifm_active |= IFM_1000_LX;
   10413 	else
   10414 		ifmr->ifm_active |= IFM_1000_SX;
   10415 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10416 		ifmr->ifm_active |= IFM_FDX;
   10417 	else
   10418 		ifmr->ifm_active |= IFM_HDX;
   10419 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10420 	if (ctrl & CTRL_RFCE)
   10421 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10422 	if (ctrl & CTRL_TFCE)
   10423 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10424 }
   10425 
   10426 /* XXX TBI only */
   10427 static int
   10428 wm_check_for_link(struct wm_softc *sc)
   10429 {
   10430 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10431 	uint32_t rxcw;
   10432 	uint32_t ctrl;
   10433 	uint32_t status;
   10434 	uint32_t sig;
   10435 
   10436 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10437 		/* XXX need some work for >= 82571 */
   10438 		if (sc->sc_type >= WM_T_82571) {
   10439 			sc->sc_tbi_linkup = 1;
   10440 			return 0;
   10441 		}
   10442 	}
   10443 
   10444 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10445 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10446 	status = CSR_READ(sc, WMREG_STATUS);
   10447 
   10448 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10449 
   10450 	DPRINTF(WM_DEBUG_LINK,
   10451 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10452 		device_xname(sc->sc_dev), __func__,
   10453 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10454 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10455 
   10456 	/*
   10457 	 * SWDPIN   LU RXCW
   10458 	 *      0    0    0
   10459 	 *      0    0    1	(should not happen)
   10460 	 *      0    1    0	(should not happen)
   10461 	 *      0    1    1	(should not happen)
   10462 	 *      1    0    0	Disable autonego and force linkup
   10463 	 *      1    0    1	got /C/ but not linkup yet
   10464 	 *      1    1    0	(linkup)
   10465 	 *      1    1    1	If IFM_AUTO, back to autonego
   10466 	 *
   10467 	 */
   10468 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10469 	    && ((status & STATUS_LU) == 0)
   10470 	    && ((rxcw & RXCW_C) == 0)) {
   10471 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10472 			__func__));
   10473 		sc->sc_tbi_linkup = 0;
   10474 		/* Disable auto-negotiation in the TXCW register */
   10475 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10476 
   10477 		/*
   10478 		 * Force link-up and also force full-duplex.
   10479 		 *
   10480 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10481 		 * so we should update sc->sc_ctrl
   10482 		 */
   10483 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10484 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10485 	} else if (((status & STATUS_LU) != 0)
   10486 	    && ((rxcw & RXCW_C) != 0)
   10487 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10488 		sc->sc_tbi_linkup = 1;
   10489 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10490 			__func__));
   10491 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10492 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10493 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10494 	    && ((rxcw & RXCW_C) != 0)) {
   10495 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10496 	} else {
   10497 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10498 			status));
   10499 	}
   10500 
   10501 	return 0;
   10502 }
   10503 
   10504 /*
   10505  * wm_tbi_tick:
   10506  *
   10507  *	Check the link on TBI devices.
   10508  *	This function acts as mii_tick().
   10509  */
   10510 static void
   10511 wm_tbi_tick(struct wm_softc *sc)
   10512 {
   10513 	struct mii_data *mii = &sc->sc_mii;
   10514 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10515 	uint32_t status;
   10516 
   10517 	KASSERT(WM_CORE_LOCKED(sc));
   10518 
   10519 	status = CSR_READ(sc, WMREG_STATUS);
   10520 
   10521 	/* XXX is this needed? */
   10522 	(void)CSR_READ(sc, WMREG_RXCW);
   10523 	(void)CSR_READ(sc, WMREG_CTRL);
   10524 
   10525 	/* set link status */
   10526 	if ((status & STATUS_LU) == 0) {
   10527 		DPRINTF(WM_DEBUG_LINK,
   10528 		    ("%s: LINK: checklink -> down\n",
   10529 			device_xname(sc->sc_dev)));
   10530 		sc->sc_tbi_linkup = 0;
   10531 	} else if (sc->sc_tbi_linkup == 0) {
   10532 		DPRINTF(WM_DEBUG_LINK,
   10533 		    ("%s: LINK: checklink -> up %s\n",
   10534 			device_xname(sc->sc_dev),
   10535 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10536 		sc->sc_tbi_linkup = 1;
   10537 		sc->sc_tbi_serdes_ticks = 0;
   10538 	}
   10539 
   10540 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10541 		goto setled;
   10542 
   10543 	if ((status & STATUS_LU) == 0) {
   10544 		sc->sc_tbi_linkup = 0;
   10545 		/* If the timer expired, retry autonegotiation */
   10546 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10547 		    && (++sc->sc_tbi_serdes_ticks
   10548 			>= sc->sc_tbi_serdes_anegticks)) {
   10549 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10550 			sc->sc_tbi_serdes_ticks = 0;
   10551 			/*
   10552 			 * Reset the link, and let autonegotiation do
   10553 			 * its thing
   10554 			 */
   10555 			sc->sc_ctrl |= CTRL_LRST;
   10556 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10557 			CSR_WRITE_FLUSH(sc);
   10558 			delay(1000);
   10559 			sc->sc_ctrl &= ~CTRL_LRST;
   10560 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10561 			CSR_WRITE_FLUSH(sc);
   10562 			delay(1000);
   10563 			CSR_WRITE(sc, WMREG_TXCW,
   10564 			    sc->sc_txcw & ~TXCW_ANE);
   10565 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10566 		}
   10567 	}
   10568 
   10569 setled:
   10570 	wm_tbi_serdes_set_linkled(sc);
   10571 }
   10572 
   10573 /* SERDES related */
   10574 static void
   10575 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10576 {
   10577 	uint32_t reg;
   10578 
   10579 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10580 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10581 		return;
   10582 
   10583 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10584 	reg |= PCS_CFG_PCS_EN;
   10585 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10586 
   10587 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10588 	reg &= ~CTRL_EXT_SWDPIN(3);
   10589 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10590 	CSR_WRITE_FLUSH(sc);
   10591 }
   10592 
   10593 static int
   10594 wm_serdes_mediachange(struct ifnet *ifp)
   10595 {
   10596 	struct wm_softc *sc = ifp->if_softc;
   10597 	bool pcs_autoneg = true; /* XXX */
   10598 	uint32_t ctrl_ext, pcs_lctl, reg;
   10599 
   10600 	/* XXX Currently, this function is not called on 8257[12] */
   10601 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10602 	    || (sc->sc_type >= WM_T_82575))
   10603 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10604 
   10605 	wm_serdes_power_up_link_82575(sc);
   10606 
   10607 	sc->sc_ctrl |= CTRL_SLU;
   10608 
   10609 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10610 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10611 
   10612 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10613 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10614 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10615 	case CTRL_EXT_LINK_MODE_SGMII:
   10616 		pcs_autoneg = true;
   10617 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10618 		break;
   10619 	case CTRL_EXT_LINK_MODE_1000KX:
   10620 		pcs_autoneg = false;
   10621 		/* FALLTHROUGH */
   10622 	default:
   10623 		if ((sc->sc_type == WM_T_82575)
   10624 		    || (sc->sc_type == WM_T_82576)) {
   10625 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10626 				pcs_autoneg = false;
   10627 		}
   10628 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10629 		    | CTRL_FRCFDX;
   10630 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10631 	}
   10632 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10633 
   10634 	if (pcs_autoneg) {
   10635 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10636 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10637 
   10638 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10639 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10640 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10641 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10642 	} else
   10643 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10644 
   10645 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10646 
   10647 
   10648 	return 0;
   10649 }
   10650 
   10651 static void
   10652 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10653 {
   10654 	struct wm_softc *sc = ifp->if_softc;
   10655 	struct mii_data *mii = &sc->sc_mii;
   10656 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10657 	uint32_t pcs_adv, pcs_lpab, reg;
   10658 
   10659 	ifmr->ifm_status = IFM_AVALID;
   10660 	ifmr->ifm_active = IFM_ETHER;
   10661 
   10662 	/* Check PCS */
   10663 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10664 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10665 		ifmr->ifm_active |= IFM_NONE;
   10666 		sc->sc_tbi_linkup = 0;
   10667 		goto setled;
   10668 	}
   10669 
   10670 	sc->sc_tbi_linkup = 1;
   10671 	ifmr->ifm_status |= IFM_ACTIVE;
   10672 	if (sc->sc_type == WM_T_I354) {
   10673 		uint32_t status;
   10674 
   10675 		status = CSR_READ(sc, WMREG_STATUS);
   10676 		if (((status & STATUS_2P5_SKU) != 0)
   10677 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10678 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10679 		} else
   10680 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10681 	} else {
   10682 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10683 		case PCS_LSTS_SPEED_10:
   10684 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10685 			break;
   10686 		case PCS_LSTS_SPEED_100:
   10687 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10688 			break;
   10689 		case PCS_LSTS_SPEED_1000:
   10690 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10691 			break;
   10692 		default:
   10693 			device_printf(sc->sc_dev, "Unknown speed\n");
   10694 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10695 			break;
   10696 		}
   10697 	}
   10698 	if ((reg & PCS_LSTS_FDX) != 0)
   10699 		ifmr->ifm_active |= IFM_FDX;
   10700 	else
   10701 		ifmr->ifm_active |= IFM_HDX;
   10702 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10703 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10704 		/* Check flow */
   10705 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10706 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10707 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10708 			goto setled;
   10709 		}
   10710 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10711 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10712 		DPRINTF(WM_DEBUG_LINK,
   10713 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10714 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10715 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10716 			mii->mii_media_active |= IFM_FLOW
   10717 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10718 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10719 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10720 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10721 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10722 			mii->mii_media_active |= IFM_FLOW
   10723 			    | IFM_ETH_TXPAUSE;
   10724 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10725 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10726 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10727 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10728 			mii->mii_media_active |= IFM_FLOW
   10729 			    | IFM_ETH_RXPAUSE;
   10730 		}
   10731 	}
   10732 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10733 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10734 setled:
   10735 	wm_tbi_serdes_set_linkled(sc);
   10736 }
   10737 
   10738 /*
   10739  * wm_serdes_tick:
   10740  *
   10741  *	Check the link on serdes devices.
   10742  */
   10743 static void
   10744 wm_serdes_tick(struct wm_softc *sc)
   10745 {
   10746 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10747 	struct mii_data *mii = &sc->sc_mii;
   10748 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10749 	uint32_t reg;
   10750 
   10751 	KASSERT(WM_CORE_LOCKED(sc));
   10752 
   10753 	mii->mii_media_status = IFM_AVALID;
   10754 	mii->mii_media_active = IFM_ETHER;
   10755 
   10756 	/* Check PCS */
   10757 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10758 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10759 		mii->mii_media_status |= IFM_ACTIVE;
   10760 		sc->sc_tbi_linkup = 1;
   10761 		sc->sc_tbi_serdes_ticks = 0;
   10762 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10763 		if ((reg & PCS_LSTS_FDX) != 0)
   10764 			mii->mii_media_active |= IFM_FDX;
   10765 		else
   10766 			mii->mii_media_active |= IFM_HDX;
   10767 	} else {
   10768 		mii->mii_media_status |= IFM_NONE;
   10769 		sc->sc_tbi_linkup = 0;
   10770 		/* If the timer expired, retry autonegotiation */
   10771 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10772 		    && (++sc->sc_tbi_serdes_ticks
   10773 			>= sc->sc_tbi_serdes_anegticks)) {
   10774 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10775 			sc->sc_tbi_serdes_ticks = 0;
   10776 			/* XXX */
   10777 			wm_serdes_mediachange(ifp);
   10778 		}
   10779 	}
   10780 
   10781 	wm_tbi_serdes_set_linkled(sc);
   10782 }
   10783 
   10784 /* SFP related */
   10785 
   10786 static int
   10787 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10788 {
   10789 	uint32_t i2ccmd;
   10790 	int i;
   10791 
   10792 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10793 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10794 
   10795 	/* Poll the ready bit */
   10796 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10797 		delay(50);
   10798 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10799 		if (i2ccmd & I2CCMD_READY)
   10800 			break;
   10801 	}
   10802 	if ((i2ccmd & I2CCMD_READY) == 0)
   10803 		return -1;
   10804 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10805 		return -1;
   10806 
   10807 	*data = i2ccmd & 0x00ff;
   10808 
   10809 	return 0;
   10810 }
   10811 
   10812 static uint32_t
   10813 wm_sfp_get_media_type(struct wm_softc *sc)
   10814 {
   10815 	uint32_t ctrl_ext;
   10816 	uint8_t val = 0;
   10817 	int timeout = 3;
   10818 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   10819 	int rv = -1;
   10820 
   10821 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10822 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   10823 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   10824 	CSR_WRITE_FLUSH(sc);
   10825 
   10826 	/* Read SFP module data */
   10827 	while (timeout) {
   10828 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   10829 		if (rv == 0)
   10830 			break;
   10831 		delay(100*1000); /* XXX too big */
   10832 		timeout--;
   10833 	}
   10834 	if (rv != 0)
   10835 		goto out;
   10836 	switch (val) {
   10837 	case SFF_SFP_ID_SFF:
   10838 		aprint_normal_dev(sc->sc_dev,
   10839 		    "Module/Connector soldered to board\n");
   10840 		break;
   10841 	case SFF_SFP_ID_SFP:
   10842 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   10843 		break;
   10844 	case SFF_SFP_ID_UNKNOWN:
   10845 		goto out;
   10846 	default:
   10847 		break;
   10848 	}
   10849 
   10850 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   10851 	if (rv != 0) {
   10852 		goto out;
   10853 	}
   10854 
   10855 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   10856 		mediatype = WM_MEDIATYPE_SERDES;
   10857 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   10858 		sc->sc_flags |= WM_F_SGMII;
   10859 		mediatype = WM_MEDIATYPE_COPPER;
   10860 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   10861 		sc->sc_flags |= WM_F_SGMII;
   10862 		mediatype = WM_MEDIATYPE_SERDES;
   10863 	}
   10864 
   10865 out:
   10866 	/* Restore I2C interface setting */
   10867 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10868 
   10869 	return mediatype;
   10870 }
   10871 
   10872 /*
   10873  * NVM related.
   10874  * Microwire, SPI (w/wo EERD) and Flash.
   10875  */
   10876 
   10877 /* Both spi and uwire */
   10878 
   10879 /*
   10880  * wm_eeprom_sendbits:
   10881  *
   10882  *	Send a series of bits to the EEPROM.
   10883  */
   10884 static void
   10885 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   10886 {
   10887 	uint32_t reg;
   10888 	int x;
   10889 
   10890 	reg = CSR_READ(sc, WMREG_EECD);
   10891 
   10892 	for (x = nbits; x > 0; x--) {
   10893 		if (bits & (1U << (x - 1)))
   10894 			reg |= EECD_DI;
   10895 		else
   10896 			reg &= ~EECD_DI;
   10897 		CSR_WRITE(sc, WMREG_EECD, reg);
   10898 		CSR_WRITE_FLUSH(sc);
   10899 		delay(2);
   10900 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10901 		CSR_WRITE_FLUSH(sc);
   10902 		delay(2);
   10903 		CSR_WRITE(sc, WMREG_EECD, reg);
   10904 		CSR_WRITE_FLUSH(sc);
   10905 		delay(2);
   10906 	}
   10907 }
   10908 
   10909 /*
   10910  * wm_eeprom_recvbits:
   10911  *
   10912  *	Receive a series of bits from the EEPROM.
   10913  */
   10914 static void
   10915 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   10916 {
   10917 	uint32_t reg, val;
   10918 	int x;
   10919 
   10920 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   10921 
   10922 	val = 0;
   10923 	for (x = nbits; x > 0; x--) {
   10924 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10925 		CSR_WRITE_FLUSH(sc);
   10926 		delay(2);
   10927 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   10928 			val |= (1U << (x - 1));
   10929 		CSR_WRITE(sc, WMREG_EECD, reg);
   10930 		CSR_WRITE_FLUSH(sc);
   10931 		delay(2);
   10932 	}
   10933 	*valp = val;
   10934 }
   10935 
   10936 /* Microwire */
   10937 
   10938 /*
   10939  * wm_nvm_read_uwire:
   10940  *
   10941  *	Read a word from the EEPROM using the MicroWire protocol.
   10942  */
   10943 static int
   10944 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10945 {
   10946 	uint32_t reg, val;
   10947 	int i;
   10948 
   10949 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10950 		device_xname(sc->sc_dev), __func__));
   10951 
   10952 	for (i = 0; i < wordcnt; i++) {
   10953 		/* Clear SK and DI. */
   10954 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   10955 		CSR_WRITE(sc, WMREG_EECD, reg);
   10956 
   10957 		/*
   10958 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   10959 		 * and Xen.
   10960 		 *
   10961 		 * We use this workaround only for 82540 because qemu's
   10962 		 * e1000 act as 82540.
   10963 		 */
   10964 		if (sc->sc_type == WM_T_82540) {
   10965 			reg |= EECD_SK;
   10966 			CSR_WRITE(sc, WMREG_EECD, reg);
   10967 			reg &= ~EECD_SK;
   10968 			CSR_WRITE(sc, WMREG_EECD, reg);
   10969 			CSR_WRITE_FLUSH(sc);
   10970 			delay(2);
   10971 		}
   10972 		/* XXX: end of workaround */
   10973 
   10974 		/* Set CHIP SELECT. */
   10975 		reg |= EECD_CS;
   10976 		CSR_WRITE(sc, WMREG_EECD, reg);
   10977 		CSR_WRITE_FLUSH(sc);
   10978 		delay(2);
   10979 
   10980 		/* Shift in the READ command. */
   10981 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   10982 
   10983 		/* Shift in address. */
   10984 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   10985 
   10986 		/* Shift out the data. */
   10987 		wm_eeprom_recvbits(sc, &val, 16);
   10988 		data[i] = val & 0xffff;
   10989 
   10990 		/* Clear CHIP SELECT. */
   10991 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   10992 		CSR_WRITE(sc, WMREG_EECD, reg);
   10993 		CSR_WRITE_FLUSH(sc);
   10994 		delay(2);
   10995 	}
   10996 
   10997 	return 0;
   10998 }
   10999 
   11000 /* SPI */
   11001 
   11002 /*
   11003  * Set SPI and FLASH related information from the EECD register.
   11004  * For 82541 and 82547, the word size is taken from EEPROM.
   11005  */
   11006 static int
   11007 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11008 {
   11009 	int size;
   11010 	uint32_t reg;
   11011 	uint16_t data;
   11012 
   11013 	reg = CSR_READ(sc, WMREG_EECD);
   11014 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11015 
   11016 	/* Read the size of NVM from EECD by default */
   11017 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11018 	switch (sc->sc_type) {
   11019 	case WM_T_82541:
   11020 	case WM_T_82541_2:
   11021 	case WM_T_82547:
   11022 	case WM_T_82547_2:
   11023 		/* Set dummy value to access EEPROM */
   11024 		sc->sc_nvm_wordsize = 64;
   11025 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11026 		reg = data;
   11027 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11028 		if (size == 0)
   11029 			size = 6; /* 64 word size */
   11030 		else
   11031 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11032 		break;
   11033 	case WM_T_80003:
   11034 	case WM_T_82571:
   11035 	case WM_T_82572:
   11036 	case WM_T_82573: /* SPI case */
   11037 	case WM_T_82574: /* SPI case */
   11038 	case WM_T_82583: /* SPI case */
   11039 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11040 		if (size > 14)
   11041 			size = 14;
   11042 		break;
   11043 	case WM_T_82575:
   11044 	case WM_T_82576:
   11045 	case WM_T_82580:
   11046 	case WM_T_I350:
   11047 	case WM_T_I354:
   11048 	case WM_T_I210:
   11049 	case WM_T_I211:
   11050 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11051 		if (size > 15)
   11052 			size = 15;
   11053 		break;
   11054 	default:
   11055 		aprint_error_dev(sc->sc_dev,
   11056 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11057 		return -1;
   11058 		break;
   11059 	}
   11060 
   11061 	sc->sc_nvm_wordsize = 1 << size;
   11062 
   11063 	return 0;
   11064 }
   11065 
   11066 /*
   11067  * wm_nvm_ready_spi:
   11068  *
   11069  *	Wait for a SPI EEPROM to be ready for commands.
   11070  */
   11071 static int
   11072 wm_nvm_ready_spi(struct wm_softc *sc)
   11073 {
   11074 	uint32_t val;
   11075 	int usec;
   11076 
   11077 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11078 		device_xname(sc->sc_dev), __func__));
   11079 
   11080 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11081 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11082 		wm_eeprom_recvbits(sc, &val, 8);
   11083 		if ((val & SPI_SR_RDY) == 0)
   11084 			break;
   11085 	}
   11086 	if (usec >= SPI_MAX_RETRIES) {
   11087 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11088 		return 1;
   11089 	}
   11090 	return 0;
   11091 }
   11092 
   11093 /*
   11094  * wm_nvm_read_spi:
   11095  *
   11096  *	Read a work from the EEPROM using the SPI protocol.
   11097  */
   11098 static int
   11099 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11100 {
   11101 	uint32_t reg, val;
   11102 	int i;
   11103 	uint8_t opc;
   11104 
   11105 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11106 		device_xname(sc->sc_dev), __func__));
   11107 
   11108 	/* Clear SK and CS. */
   11109 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11110 	CSR_WRITE(sc, WMREG_EECD, reg);
   11111 	CSR_WRITE_FLUSH(sc);
   11112 	delay(2);
   11113 
   11114 	if (wm_nvm_ready_spi(sc))
   11115 		return 1;
   11116 
   11117 	/* Toggle CS to flush commands. */
   11118 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11119 	CSR_WRITE_FLUSH(sc);
   11120 	delay(2);
   11121 	CSR_WRITE(sc, WMREG_EECD, reg);
   11122 	CSR_WRITE_FLUSH(sc);
   11123 	delay(2);
   11124 
   11125 	opc = SPI_OPC_READ;
   11126 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11127 		opc |= SPI_OPC_A8;
   11128 
   11129 	wm_eeprom_sendbits(sc, opc, 8);
   11130 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11131 
   11132 	for (i = 0; i < wordcnt; i++) {
   11133 		wm_eeprom_recvbits(sc, &val, 16);
   11134 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11135 	}
   11136 
   11137 	/* Raise CS and clear SK. */
   11138 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11139 	CSR_WRITE(sc, WMREG_EECD, reg);
   11140 	CSR_WRITE_FLUSH(sc);
   11141 	delay(2);
   11142 
   11143 	return 0;
   11144 }
   11145 
   11146 /* Using with EERD */
   11147 
   11148 static int
   11149 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11150 {
   11151 	uint32_t attempts = 100000;
   11152 	uint32_t i, reg = 0;
   11153 	int32_t done = -1;
   11154 
   11155 	for (i = 0; i < attempts; i++) {
   11156 		reg = CSR_READ(sc, rw);
   11157 
   11158 		if (reg & EERD_DONE) {
   11159 			done = 0;
   11160 			break;
   11161 		}
   11162 		delay(5);
   11163 	}
   11164 
   11165 	return done;
   11166 }
   11167 
   11168 static int
   11169 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11170     uint16_t *data)
   11171 {
   11172 	int i, eerd = 0;
   11173 	int error = 0;
   11174 
   11175 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11176 		device_xname(sc->sc_dev), __func__));
   11177 
   11178 	for (i = 0; i < wordcnt; i++) {
   11179 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11180 
   11181 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11182 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11183 		if (error != 0)
   11184 			break;
   11185 
   11186 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11187 	}
   11188 
   11189 	return error;
   11190 }
   11191 
   11192 /* Flash */
   11193 
   11194 static int
   11195 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11196 {
   11197 	uint32_t eecd;
   11198 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11199 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11200 	uint8_t sig_byte = 0;
   11201 
   11202 	switch (sc->sc_type) {
   11203 	case WM_T_PCH_SPT:
   11204 		/*
   11205 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11206 		 * sector valid bits from the NVM.
   11207 		 */
   11208 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11209 		if ((*bank == 0) || (*bank == 1)) {
   11210 			aprint_error_dev(sc->sc_dev,
   11211 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11212 				*bank);
   11213 			return -1;
   11214 		} else {
   11215 			*bank = *bank - 2;
   11216 			return 0;
   11217 		}
   11218 	case WM_T_ICH8:
   11219 	case WM_T_ICH9:
   11220 		eecd = CSR_READ(sc, WMREG_EECD);
   11221 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11222 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11223 			return 0;
   11224 		}
   11225 		/* FALLTHROUGH */
   11226 	default:
   11227 		/* Default to 0 */
   11228 		*bank = 0;
   11229 
   11230 		/* Check bank 0 */
   11231 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11232 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11233 			*bank = 0;
   11234 			return 0;
   11235 		}
   11236 
   11237 		/* Check bank 1 */
   11238 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11239 		    &sig_byte);
   11240 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11241 			*bank = 1;
   11242 			return 0;
   11243 		}
   11244 	}
   11245 
   11246 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11247 		device_xname(sc->sc_dev)));
   11248 	return -1;
   11249 }
   11250 
   11251 /******************************************************************************
   11252  * This function does initial flash setup so that a new read/write/erase cycle
   11253  * can be started.
   11254  *
   11255  * sc - The pointer to the hw structure
   11256  ****************************************************************************/
   11257 static int32_t
   11258 wm_ich8_cycle_init(struct wm_softc *sc)
   11259 {
   11260 	uint16_t hsfsts;
   11261 	int32_t error = 1;
   11262 	int32_t i     = 0;
   11263 
   11264 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11265 
   11266 	/* May be check the Flash Des Valid bit in Hw status */
   11267 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11268 		return error;
   11269 	}
   11270 
   11271 	/* Clear FCERR in Hw status by writing 1 */
   11272 	/* Clear DAEL in Hw status by writing a 1 */
   11273 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11274 
   11275 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11276 
   11277 	/*
   11278 	 * Either we should have a hardware SPI cycle in progress bit to check
   11279 	 * against, in order to start a new cycle or FDONE bit should be
   11280 	 * changed in the hardware so that it is 1 after harware reset, which
   11281 	 * can then be used as an indication whether a cycle is in progress or
   11282 	 * has been completed .. we should also have some software semaphore
   11283 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11284 	 * threads access to those bits can be sequentiallized or a way so that
   11285 	 * 2 threads dont start the cycle at the same time
   11286 	 */
   11287 
   11288 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11289 		/*
   11290 		 * There is no cycle running at present, so we can start a
   11291 		 * cycle
   11292 		 */
   11293 
   11294 		/* Begin by setting Flash Cycle Done. */
   11295 		hsfsts |= HSFSTS_DONE;
   11296 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11297 		error = 0;
   11298 	} else {
   11299 		/*
   11300 		 * otherwise poll for sometime so the current cycle has a
   11301 		 * chance to end before giving up.
   11302 		 */
   11303 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11304 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11305 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11306 				error = 0;
   11307 				break;
   11308 			}
   11309 			delay(1);
   11310 		}
   11311 		if (error == 0) {
   11312 			/*
   11313 			 * Successful in waiting for previous cycle to timeout,
   11314 			 * now set the Flash Cycle Done.
   11315 			 */
   11316 			hsfsts |= HSFSTS_DONE;
   11317 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11318 		}
   11319 	}
   11320 	return error;
   11321 }
   11322 
   11323 /******************************************************************************
   11324  * This function starts a flash cycle and waits for its completion
   11325  *
   11326  * sc - The pointer to the hw structure
   11327  ****************************************************************************/
   11328 static int32_t
   11329 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11330 {
   11331 	uint16_t hsflctl;
   11332 	uint16_t hsfsts;
   11333 	int32_t error = 1;
   11334 	uint32_t i = 0;
   11335 
   11336 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11337 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11338 	hsflctl |= HSFCTL_GO;
   11339 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11340 
   11341 	/* Wait till FDONE bit is set to 1 */
   11342 	do {
   11343 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11344 		if (hsfsts & HSFSTS_DONE)
   11345 			break;
   11346 		delay(1);
   11347 		i++;
   11348 	} while (i < timeout);
   11349 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11350 		error = 0;
   11351 
   11352 	return error;
   11353 }
   11354 
   11355 /******************************************************************************
   11356  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11357  *
   11358  * sc - The pointer to the hw structure
   11359  * index - The index of the byte or word to read.
   11360  * size - Size of data to read, 1=byte 2=word, 4=dword
   11361  * data - Pointer to the word to store the value read.
   11362  *****************************************************************************/
   11363 static int32_t
   11364 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11365     uint32_t size, uint32_t *data)
   11366 {
   11367 	uint16_t hsfsts;
   11368 	uint16_t hsflctl;
   11369 	uint32_t flash_linear_address;
   11370 	uint32_t flash_data = 0;
   11371 	int32_t error = 1;
   11372 	int32_t count = 0;
   11373 
   11374 	if (size < 1  || size > 4 || data == 0x0 ||
   11375 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11376 		return error;
   11377 
   11378 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11379 	    sc->sc_ich8_flash_base;
   11380 
   11381 	do {
   11382 		delay(1);
   11383 		/* Steps */
   11384 		error = wm_ich8_cycle_init(sc);
   11385 		if (error)
   11386 			break;
   11387 
   11388 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11389 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11390 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11391 		    & HSFCTL_BCOUNT_MASK;
   11392 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11393 		if (sc->sc_type == WM_T_PCH_SPT) {
   11394 			/*
   11395 			 * In SPT, This register is in Lan memory space, not
   11396 			 * flash. Therefore, only 32 bit access is supported.
   11397 			 */
   11398 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11399 			    (uint32_t)hsflctl);
   11400 		} else
   11401 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11402 
   11403 		/*
   11404 		 * Write the last 24 bits of index into Flash Linear address
   11405 		 * field in Flash Address
   11406 		 */
   11407 		/* TODO: TBD maybe check the index against the size of flash */
   11408 
   11409 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11410 
   11411 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11412 
   11413 		/*
   11414 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11415 		 * the whole sequence a few more times, else read in (shift in)
   11416 		 * the Flash Data0, the order is least significant byte first
   11417 		 * msb to lsb
   11418 		 */
   11419 		if (error == 0) {
   11420 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11421 			if (size == 1)
   11422 				*data = (uint8_t)(flash_data & 0x000000FF);
   11423 			else if (size == 2)
   11424 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11425 			else if (size == 4)
   11426 				*data = (uint32_t)flash_data;
   11427 			break;
   11428 		} else {
   11429 			/*
   11430 			 * If we've gotten here, then things are probably
   11431 			 * completely hosed, but if the error condition is
   11432 			 * detected, it won't hurt to give it another try...
   11433 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11434 			 */
   11435 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11436 			if (hsfsts & HSFSTS_ERR) {
   11437 				/* Repeat for some time before giving up. */
   11438 				continue;
   11439 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11440 				break;
   11441 		}
   11442 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11443 
   11444 	return error;
   11445 }
   11446 
   11447 /******************************************************************************
   11448  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11449  *
   11450  * sc - pointer to wm_hw structure
   11451  * index - The index of the byte to read.
   11452  * data - Pointer to a byte to store the value read.
   11453  *****************************************************************************/
   11454 static int32_t
   11455 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11456 {
   11457 	int32_t status;
   11458 	uint32_t word = 0;
   11459 
   11460 	status = wm_read_ich8_data(sc, index, 1, &word);
   11461 	if (status == 0)
   11462 		*data = (uint8_t)word;
   11463 	else
   11464 		*data = 0;
   11465 
   11466 	return status;
   11467 }
   11468 
   11469 /******************************************************************************
   11470  * Reads a word from the NVM using the ICH8 flash access registers.
   11471  *
   11472  * sc - pointer to wm_hw structure
   11473  * index - The starting byte index of the word to read.
   11474  * data - Pointer to a word to store the value read.
   11475  *****************************************************************************/
   11476 static int32_t
   11477 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11478 {
   11479 	int32_t status;
   11480 	uint32_t word = 0;
   11481 
   11482 	status = wm_read_ich8_data(sc, index, 2, &word);
   11483 	if (status == 0)
   11484 		*data = (uint16_t)word;
   11485 	else
   11486 		*data = 0;
   11487 
   11488 	return status;
   11489 }
   11490 
   11491 /******************************************************************************
   11492  * Reads a dword from the NVM using the ICH8 flash access registers.
   11493  *
   11494  * sc - pointer to wm_hw structure
   11495  * index - The starting byte index of the word to read.
   11496  * data - Pointer to a word to store the value read.
   11497  *****************************************************************************/
   11498 static int32_t
   11499 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11500 {
   11501 	int32_t status;
   11502 
   11503 	status = wm_read_ich8_data(sc, index, 4, data);
   11504 	return status;
   11505 }
   11506 
   11507 /******************************************************************************
   11508  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11509  * register.
   11510  *
   11511  * sc - Struct containing variables accessed by shared code
   11512  * offset - offset of word in the EEPROM to read
   11513  * data - word read from the EEPROM
   11514  * words - number of words to read
   11515  *****************************************************************************/
   11516 static int
   11517 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11518 {
   11519 	int32_t  error = 0;
   11520 	uint32_t flash_bank = 0;
   11521 	uint32_t act_offset = 0;
   11522 	uint32_t bank_offset = 0;
   11523 	uint16_t word = 0;
   11524 	uint16_t i = 0;
   11525 
   11526 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11527 		device_xname(sc->sc_dev), __func__));
   11528 
   11529 	/*
   11530 	 * We need to know which is the valid flash bank.  In the event
   11531 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11532 	 * managing flash_bank.  So it cannot be trusted and needs
   11533 	 * to be updated with each read.
   11534 	 */
   11535 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11536 	if (error) {
   11537 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11538 			device_xname(sc->sc_dev)));
   11539 		flash_bank = 0;
   11540 	}
   11541 
   11542 	/*
   11543 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11544 	 * size
   11545 	 */
   11546 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11547 
   11548 	error = wm_get_swfwhw_semaphore(sc);
   11549 	if (error) {
   11550 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11551 		    __func__);
   11552 		return error;
   11553 	}
   11554 
   11555 	for (i = 0; i < words; i++) {
   11556 		/* The NVM part needs a byte offset, hence * 2 */
   11557 		act_offset = bank_offset + ((offset + i) * 2);
   11558 		error = wm_read_ich8_word(sc, act_offset, &word);
   11559 		if (error) {
   11560 			aprint_error_dev(sc->sc_dev,
   11561 			    "%s: failed to read NVM\n", __func__);
   11562 			break;
   11563 		}
   11564 		data[i] = word;
   11565 	}
   11566 
   11567 	wm_put_swfwhw_semaphore(sc);
   11568 	return error;
   11569 }
   11570 
   11571 /******************************************************************************
   11572  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11573  * register.
   11574  *
   11575  * sc - Struct containing variables accessed by shared code
   11576  * offset - offset of word in the EEPROM to read
   11577  * data - word read from the EEPROM
   11578  * words - number of words to read
   11579  *****************************************************************************/
   11580 static int
   11581 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11582 {
   11583 	int32_t  error = 0;
   11584 	uint32_t flash_bank = 0;
   11585 	uint32_t act_offset = 0;
   11586 	uint32_t bank_offset = 0;
   11587 	uint32_t dword = 0;
   11588 	uint16_t i = 0;
   11589 
   11590 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11591 		device_xname(sc->sc_dev), __func__));
   11592 
   11593 	/*
   11594 	 * We need to know which is the valid flash bank.  In the event
   11595 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11596 	 * managing flash_bank.  So it cannot be trusted and needs
   11597 	 * to be updated with each read.
   11598 	 */
   11599 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11600 	if (error) {
   11601 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11602 			device_xname(sc->sc_dev)));
   11603 		flash_bank = 0;
   11604 	}
   11605 
   11606 	/*
   11607 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11608 	 * size
   11609 	 */
   11610 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11611 
   11612 	error = wm_get_swfwhw_semaphore(sc);
   11613 	if (error) {
   11614 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11615 		    __func__);
   11616 		return error;
   11617 	}
   11618 
   11619 	for (i = 0; i < words; i++) {
   11620 		/* The NVM part needs a byte offset, hence * 2 */
   11621 		act_offset = bank_offset + ((offset + i) * 2);
   11622 		/* but we must read dword aligned, so mask ... */
   11623 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11624 		if (error) {
   11625 			aprint_error_dev(sc->sc_dev,
   11626 			    "%s: failed to read NVM\n", __func__);
   11627 			break;
   11628 		}
   11629 		/* ... and pick out low or high word */
   11630 		if ((act_offset & 0x2) == 0)
   11631 			data[i] = (uint16_t)(dword & 0xFFFF);
   11632 		else
   11633 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11634 	}
   11635 
   11636 	wm_put_swfwhw_semaphore(sc);
   11637 	return error;
   11638 }
   11639 
   11640 /* iNVM */
   11641 
   11642 static int
   11643 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11644 {
   11645 	int32_t  rv = 0;
   11646 	uint32_t invm_dword;
   11647 	uint16_t i;
   11648 	uint8_t record_type, word_address;
   11649 
   11650 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11651 		device_xname(sc->sc_dev), __func__));
   11652 
   11653 	for (i = 0; i < INVM_SIZE; i++) {
   11654 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11655 		/* Get record type */
   11656 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11657 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11658 			break;
   11659 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11660 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11661 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11662 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11663 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11664 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11665 			if (word_address == address) {
   11666 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11667 				rv = 0;
   11668 				break;
   11669 			}
   11670 		}
   11671 	}
   11672 
   11673 	return rv;
   11674 }
   11675 
   11676 static int
   11677 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11678 {
   11679 	int rv = 0;
   11680 	int i;
   11681 
   11682 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11683 		device_xname(sc->sc_dev), __func__));
   11684 
   11685 	for (i = 0; i < words; i++) {
   11686 		switch (offset + i) {
   11687 		case NVM_OFF_MACADDR:
   11688 		case NVM_OFF_MACADDR1:
   11689 		case NVM_OFF_MACADDR2:
   11690 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11691 			if (rv != 0) {
   11692 				data[i] = 0xffff;
   11693 				rv = -1;
   11694 			}
   11695 			break;
   11696 		case NVM_OFF_CFG2:
   11697 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11698 			if (rv != 0) {
   11699 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11700 				rv = 0;
   11701 			}
   11702 			break;
   11703 		case NVM_OFF_CFG4:
   11704 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11705 			if (rv != 0) {
   11706 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11707 				rv = 0;
   11708 			}
   11709 			break;
   11710 		case NVM_OFF_LED_1_CFG:
   11711 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11712 			if (rv != 0) {
   11713 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11714 				rv = 0;
   11715 			}
   11716 			break;
   11717 		case NVM_OFF_LED_0_2_CFG:
   11718 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11719 			if (rv != 0) {
   11720 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11721 				rv = 0;
   11722 			}
   11723 			break;
   11724 		case NVM_OFF_ID_LED_SETTINGS:
   11725 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11726 			if (rv != 0) {
   11727 				*data = ID_LED_RESERVED_FFFF;
   11728 				rv = 0;
   11729 			}
   11730 			break;
   11731 		default:
   11732 			DPRINTF(WM_DEBUG_NVM,
   11733 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11734 			*data = NVM_RESERVED_WORD;
   11735 			break;
   11736 		}
   11737 	}
   11738 
   11739 	return rv;
   11740 }
   11741 
   11742 /* Lock, detecting NVM type, validate checksum, version and read */
   11743 
   11744 /*
   11745  * wm_nvm_acquire:
   11746  *
   11747  *	Perform the EEPROM handshake required on some chips.
   11748  */
   11749 static int
   11750 wm_nvm_acquire(struct wm_softc *sc)
   11751 {
   11752 	uint32_t reg;
   11753 	int x;
   11754 	int ret = 0;
   11755 
   11756 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11757 		device_xname(sc->sc_dev), __func__));
   11758 
   11759 	if (sc->sc_type >= WM_T_ICH8) {
   11760 		ret = wm_get_nvm_ich8lan(sc);
   11761 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11762 		ret = wm_get_swfwhw_semaphore(sc);
   11763 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11764 		/* This will also do wm_get_swsm_semaphore() if needed */
   11765 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11766 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11767 		ret = wm_get_swsm_semaphore(sc);
   11768 	}
   11769 
   11770 	if (ret) {
   11771 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11772 			__func__);
   11773 		return 1;
   11774 	}
   11775 
   11776 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11777 		reg = CSR_READ(sc, WMREG_EECD);
   11778 
   11779 		/* Request EEPROM access. */
   11780 		reg |= EECD_EE_REQ;
   11781 		CSR_WRITE(sc, WMREG_EECD, reg);
   11782 
   11783 		/* ..and wait for it to be granted. */
   11784 		for (x = 0; x < 1000; x++) {
   11785 			reg = CSR_READ(sc, WMREG_EECD);
   11786 			if (reg & EECD_EE_GNT)
   11787 				break;
   11788 			delay(5);
   11789 		}
   11790 		if ((reg & EECD_EE_GNT) == 0) {
   11791 			aprint_error_dev(sc->sc_dev,
   11792 			    "could not acquire EEPROM GNT\n");
   11793 			reg &= ~EECD_EE_REQ;
   11794 			CSR_WRITE(sc, WMREG_EECD, reg);
   11795 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11796 				wm_put_swfwhw_semaphore(sc);
   11797 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   11798 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11799 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11800 				wm_put_swsm_semaphore(sc);
   11801 			return 1;
   11802 		}
   11803 	}
   11804 
   11805 	return 0;
   11806 }
   11807 
   11808 /*
   11809  * wm_nvm_release:
   11810  *
   11811  *	Release the EEPROM mutex.
   11812  */
   11813 static void
   11814 wm_nvm_release(struct wm_softc *sc)
   11815 {
   11816 	uint32_t reg;
   11817 
   11818 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11819 		device_xname(sc->sc_dev), __func__));
   11820 
   11821 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11822 		reg = CSR_READ(sc, WMREG_EECD);
   11823 		reg &= ~EECD_EE_REQ;
   11824 		CSR_WRITE(sc, WMREG_EECD, reg);
   11825 	}
   11826 
   11827 	if (sc->sc_type >= WM_T_ICH8) {
   11828 		wm_put_nvm_ich8lan(sc);
   11829 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11830 		wm_put_swfwhw_semaphore(sc);
   11831 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   11832 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11833 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11834 		wm_put_swsm_semaphore(sc);
   11835 }
   11836 
   11837 static int
   11838 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   11839 {
   11840 	uint32_t eecd = 0;
   11841 
   11842 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   11843 	    || sc->sc_type == WM_T_82583) {
   11844 		eecd = CSR_READ(sc, WMREG_EECD);
   11845 
   11846 		/* Isolate bits 15 & 16 */
   11847 		eecd = ((eecd >> 15) & 0x03);
   11848 
   11849 		/* If both bits are set, device is Flash type */
   11850 		if (eecd == 0x03)
   11851 			return 0;
   11852 	}
   11853 	return 1;
   11854 }
   11855 
   11856 static int
   11857 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   11858 {
   11859 	uint32_t eec;
   11860 
   11861 	eec = CSR_READ(sc, WMREG_EEC);
   11862 	if ((eec & EEC_FLASH_DETECTED) != 0)
   11863 		return 1;
   11864 
   11865 	return 0;
   11866 }
   11867 
   11868 /*
   11869  * wm_nvm_validate_checksum
   11870  *
   11871  * The checksum is defined as the sum of the first 64 (16 bit) words.
   11872  */
   11873 static int
   11874 wm_nvm_validate_checksum(struct wm_softc *sc)
   11875 {
   11876 	uint16_t checksum;
   11877 	uint16_t eeprom_data;
   11878 #ifdef WM_DEBUG
   11879 	uint16_t csum_wordaddr, valid_checksum;
   11880 #endif
   11881 	int i;
   11882 
   11883 	checksum = 0;
   11884 
   11885 	/* Don't check for I211 */
   11886 	if (sc->sc_type == WM_T_I211)
   11887 		return 0;
   11888 
   11889 #ifdef WM_DEBUG
   11890 	if (sc->sc_type == WM_T_PCH_LPT) {
   11891 		csum_wordaddr = NVM_OFF_COMPAT;
   11892 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   11893 	} else {
   11894 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   11895 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   11896 	}
   11897 
   11898 	/* Dump EEPROM image for debug */
   11899 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11900 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11901 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   11902 		/* XXX PCH_SPT? */
   11903 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   11904 		if ((eeprom_data & valid_checksum) == 0) {
   11905 			DPRINTF(WM_DEBUG_NVM,
   11906 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   11907 				device_xname(sc->sc_dev), eeprom_data,
   11908 				    valid_checksum));
   11909 		}
   11910 	}
   11911 
   11912 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   11913 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   11914 		for (i = 0; i < NVM_SIZE; i++) {
   11915 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11916 				printf("XXXX ");
   11917 			else
   11918 				printf("%04hx ", eeprom_data);
   11919 			if (i % 8 == 7)
   11920 				printf("\n");
   11921 		}
   11922 	}
   11923 
   11924 #endif /* WM_DEBUG */
   11925 
   11926 	for (i = 0; i < NVM_SIZE; i++) {
   11927 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11928 			return 1;
   11929 		checksum += eeprom_data;
   11930 	}
   11931 
   11932 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   11933 #ifdef WM_DEBUG
   11934 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   11935 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   11936 #endif
   11937 	}
   11938 
   11939 	return 0;
   11940 }
   11941 
   11942 static void
   11943 wm_nvm_version_invm(struct wm_softc *sc)
   11944 {
   11945 	uint32_t dword;
   11946 
   11947 	/*
   11948 	 * Linux's code to decode version is very strange, so we don't
   11949 	 * obey that algorithm and just use word 61 as the document.
   11950 	 * Perhaps it's not perfect though...
   11951 	 *
   11952 	 * Example:
   11953 	 *
   11954 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   11955 	 */
   11956 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   11957 	dword = __SHIFTOUT(dword, INVM_VER_1);
   11958 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   11959 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   11960 }
   11961 
   11962 static void
   11963 wm_nvm_version(struct wm_softc *sc)
   11964 {
   11965 	uint16_t major, minor, build, patch;
   11966 	uint16_t uid0, uid1;
   11967 	uint16_t nvm_data;
   11968 	uint16_t off;
   11969 	bool check_version = false;
   11970 	bool check_optionrom = false;
   11971 	bool have_build = false;
   11972 
   11973 	/*
   11974 	 * Version format:
   11975 	 *
   11976 	 * XYYZ
   11977 	 * X0YZ
   11978 	 * X0YY
   11979 	 *
   11980 	 * Example:
   11981 	 *
   11982 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   11983 	 *	82571	0x50a6	5.10.6?
   11984 	 *	82572	0x506a	5.6.10?
   11985 	 *	82572EI	0x5069	5.6.9?
   11986 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   11987 	 *		0x2013	2.1.3?
   11988 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   11989 	 */
   11990 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   11991 	switch (sc->sc_type) {
   11992 	case WM_T_82571:
   11993 	case WM_T_82572:
   11994 	case WM_T_82574:
   11995 	case WM_T_82583:
   11996 		check_version = true;
   11997 		check_optionrom = true;
   11998 		have_build = true;
   11999 		break;
   12000 	case WM_T_82575:
   12001 	case WM_T_82576:
   12002 	case WM_T_82580:
   12003 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12004 			check_version = true;
   12005 		break;
   12006 	case WM_T_I211:
   12007 		wm_nvm_version_invm(sc);
   12008 		goto printver;
   12009 	case WM_T_I210:
   12010 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12011 			wm_nvm_version_invm(sc);
   12012 			goto printver;
   12013 		}
   12014 		/* FALLTHROUGH */
   12015 	case WM_T_I350:
   12016 	case WM_T_I354:
   12017 		check_version = true;
   12018 		check_optionrom = true;
   12019 		break;
   12020 	default:
   12021 		return;
   12022 	}
   12023 	if (check_version) {
   12024 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12025 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12026 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12027 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12028 			build = nvm_data & NVM_BUILD_MASK;
   12029 			have_build = true;
   12030 		} else
   12031 			minor = nvm_data & 0x00ff;
   12032 
   12033 		/* Decimal */
   12034 		minor = (minor / 16) * 10 + (minor % 16);
   12035 		sc->sc_nvm_ver_major = major;
   12036 		sc->sc_nvm_ver_minor = minor;
   12037 
   12038 printver:
   12039 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12040 		    sc->sc_nvm_ver_minor);
   12041 		if (have_build) {
   12042 			sc->sc_nvm_ver_build = build;
   12043 			aprint_verbose(".%d", build);
   12044 		}
   12045 	}
   12046 	if (check_optionrom) {
   12047 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12048 		/* Option ROM Version */
   12049 		if ((off != 0x0000) && (off != 0xffff)) {
   12050 			off += NVM_COMBO_VER_OFF;
   12051 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12052 			wm_nvm_read(sc, off, 1, &uid0);
   12053 			if ((uid0 != 0) && (uid0 != 0xffff)
   12054 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12055 				/* 16bits */
   12056 				major = uid0 >> 8;
   12057 				build = (uid0 << 8) | (uid1 >> 8);
   12058 				patch = uid1 & 0x00ff;
   12059 				aprint_verbose(", option ROM Version %d.%d.%d",
   12060 				    major, build, patch);
   12061 			}
   12062 		}
   12063 	}
   12064 
   12065 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12066 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12067 }
   12068 
   12069 /*
   12070  * wm_nvm_read:
   12071  *
   12072  *	Read data from the serial EEPROM.
   12073  */
   12074 static int
   12075 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12076 {
   12077 	int rv;
   12078 
   12079 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12080 		device_xname(sc->sc_dev), __func__));
   12081 
   12082 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12083 		return 1;
   12084 
   12085 	if (wm_nvm_acquire(sc))
   12086 		return 1;
   12087 
   12088 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12089 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12090 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   12091 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   12092 	else if (sc->sc_type == WM_T_PCH_SPT)
   12093 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   12094 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   12095 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   12096 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   12097 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   12098 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   12099 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   12100 	else
   12101 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   12102 
   12103 	wm_nvm_release(sc);
   12104 	return rv;
   12105 }
   12106 
   12107 /*
   12108  * Hardware semaphores.
   12109  * Very complexed...
   12110  */
   12111 
   12112 static int
   12113 wm_get_null(struct wm_softc *sc)
   12114 {
   12115 
   12116 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12117 		device_xname(sc->sc_dev), __func__));
   12118 	return 0;
   12119 }
   12120 
   12121 static void
   12122 wm_put_null(struct wm_softc *sc)
   12123 {
   12124 
   12125 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12126 		device_xname(sc->sc_dev), __func__));
   12127 	return;
   12128 }
   12129 
   12130 /*
   12131  * Get hardware semaphore.
   12132  * Same as e1000_get_hw_semaphore_generic()
   12133  */
   12134 static int
   12135 wm_get_swsm_semaphore(struct wm_softc *sc)
   12136 {
   12137 	int32_t timeout;
   12138 	uint32_t swsm;
   12139 
   12140 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12141 		device_xname(sc->sc_dev), __func__));
   12142 	KASSERT(sc->sc_nvm_wordsize > 0);
   12143 
   12144 	/* Get the SW semaphore. */
   12145 	timeout = sc->sc_nvm_wordsize + 1;
   12146 	while (timeout) {
   12147 		swsm = CSR_READ(sc, WMREG_SWSM);
   12148 
   12149 		if ((swsm & SWSM_SMBI) == 0)
   12150 			break;
   12151 
   12152 		delay(50);
   12153 		timeout--;
   12154 	}
   12155 
   12156 	if (timeout == 0) {
   12157 		aprint_error_dev(sc->sc_dev,
   12158 		    "could not acquire SWSM SMBI\n");
   12159 		return 1;
   12160 	}
   12161 
   12162 	/* Get the FW semaphore. */
   12163 	timeout = sc->sc_nvm_wordsize + 1;
   12164 	while (timeout) {
   12165 		swsm = CSR_READ(sc, WMREG_SWSM);
   12166 		swsm |= SWSM_SWESMBI;
   12167 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12168 		/* If we managed to set the bit we got the semaphore. */
   12169 		swsm = CSR_READ(sc, WMREG_SWSM);
   12170 		if (swsm & SWSM_SWESMBI)
   12171 			break;
   12172 
   12173 		delay(50);
   12174 		timeout--;
   12175 	}
   12176 
   12177 	if (timeout == 0) {
   12178 		aprint_error_dev(sc->sc_dev,
   12179 		    "could not acquire SWSM SWESMBI\n");
   12180 		/* Release semaphores */
   12181 		wm_put_swsm_semaphore(sc);
   12182 		return 1;
   12183 	}
   12184 	return 0;
   12185 }
   12186 
   12187 /*
   12188  * Put hardware semaphore.
   12189  * Same as e1000_put_hw_semaphore_generic()
   12190  */
   12191 static void
   12192 wm_put_swsm_semaphore(struct wm_softc *sc)
   12193 {
   12194 	uint32_t swsm;
   12195 
   12196 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12197 		device_xname(sc->sc_dev), __func__));
   12198 
   12199 	swsm = CSR_READ(sc, WMREG_SWSM);
   12200 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12201 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12202 }
   12203 
   12204 /*
   12205  * Get SW/FW semaphore.
   12206  * Same as e1000_acquire_swfw_sync_82575().
   12207  */
   12208 static int
   12209 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12210 {
   12211 	uint32_t swfw_sync;
   12212 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12213 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12214 	int timeout = 200;
   12215 
   12216 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12217 		device_xname(sc->sc_dev), __func__));
   12218 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12219 
   12220 	for (timeout = 0; timeout < 200; timeout++) {
   12221 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12222 			if (wm_get_swsm_semaphore(sc)) {
   12223 				aprint_error_dev(sc->sc_dev,
   12224 				    "%s: failed to get semaphore\n",
   12225 				    __func__);
   12226 				return 1;
   12227 			}
   12228 		}
   12229 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12230 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12231 			swfw_sync |= swmask;
   12232 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12233 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12234 				wm_put_swsm_semaphore(sc);
   12235 			return 0;
   12236 		}
   12237 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12238 			wm_put_swsm_semaphore(sc);
   12239 		delay(5000);
   12240 	}
   12241 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12242 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12243 	return 1;
   12244 }
   12245 
   12246 static void
   12247 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12248 {
   12249 	uint32_t swfw_sync;
   12250 
   12251 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12252 		device_xname(sc->sc_dev), __func__));
   12253 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12254 
   12255 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12256 		while (wm_get_swsm_semaphore(sc) != 0)
   12257 			continue;
   12258 	}
   12259 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12260 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12261 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12262 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12263 		wm_put_swsm_semaphore(sc);
   12264 }
   12265 
   12266 static int
   12267 wm_get_phy_82575(struct wm_softc *sc)
   12268 {
   12269 
   12270 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12271 		device_xname(sc->sc_dev), __func__));
   12272 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12273 }
   12274 
   12275 static void
   12276 wm_put_phy_82575(struct wm_softc *sc)
   12277 {
   12278 
   12279 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12280 		device_xname(sc->sc_dev), __func__));
   12281 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12282 }
   12283 
   12284 static int
   12285 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12286 {
   12287 	uint32_t ext_ctrl;
   12288 	int timeout = 200;
   12289 
   12290 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12291 		device_xname(sc->sc_dev), __func__));
   12292 
   12293 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12294 	for (timeout = 0; timeout < 200; timeout++) {
   12295 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12296 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12297 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12298 
   12299 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12300 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12301 			return 0;
   12302 		delay(5000);
   12303 	}
   12304 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12305 	    device_xname(sc->sc_dev), ext_ctrl);
   12306 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12307 	return 1;
   12308 }
   12309 
   12310 static void
   12311 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12312 {
   12313 	uint32_t ext_ctrl;
   12314 
   12315 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12316 		device_xname(sc->sc_dev), __func__));
   12317 
   12318 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12319 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12320 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12321 
   12322 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12323 }
   12324 
   12325 static int
   12326 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12327 {
   12328 	uint32_t ext_ctrl;
   12329 	int timeout;
   12330 
   12331 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12332 		device_xname(sc->sc_dev), __func__));
   12333 	mutex_enter(sc->sc_ich_phymtx);
   12334 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12335 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12336 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12337 			break;
   12338 		delay(1000);
   12339 	}
   12340 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12341 		printf("%s: SW has already locked the resource\n",
   12342 		    device_xname(sc->sc_dev));
   12343 		goto out;
   12344 	}
   12345 
   12346 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12347 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12348 	for (timeout = 0; timeout < 1000; timeout++) {
   12349 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12350 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12351 			break;
   12352 		delay(1000);
   12353 	}
   12354 	if (timeout >= 1000) {
   12355 		printf("%s: failed to acquire semaphore\n",
   12356 		    device_xname(sc->sc_dev));
   12357 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12358 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12359 		goto out;
   12360 	}
   12361 	return 0;
   12362 
   12363 out:
   12364 	mutex_exit(sc->sc_ich_phymtx);
   12365 	return 1;
   12366 }
   12367 
   12368 static void
   12369 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12370 {
   12371 	uint32_t ext_ctrl;
   12372 
   12373 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12374 		device_xname(sc->sc_dev), __func__));
   12375 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12376 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12377 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12378 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12379 	} else {
   12380 		printf("%s: Semaphore unexpectedly released\n",
   12381 		    device_xname(sc->sc_dev));
   12382 	}
   12383 
   12384 	mutex_exit(sc->sc_ich_phymtx);
   12385 }
   12386 
   12387 static int
   12388 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12389 {
   12390 
   12391 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12392 		device_xname(sc->sc_dev), __func__));
   12393 	mutex_enter(sc->sc_ich_nvmmtx);
   12394 
   12395 	return 0;
   12396 }
   12397 
   12398 static void
   12399 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12400 {
   12401 
   12402 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12403 		device_xname(sc->sc_dev), __func__));
   12404 	mutex_exit(sc->sc_ich_nvmmtx);
   12405 }
   12406 
   12407 static int
   12408 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12409 {
   12410 	int i = 0;
   12411 	uint32_t reg;
   12412 
   12413 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12414 		device_xname(sc->sc_dev), __func__));
   12415 
   12416 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12417 	do {
   12418 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12419 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12420 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12421 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12422 			break;
   12423 		delay(2*1000);
   12424 		i++;
   12425 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12426 
   12427 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12428 		wm_put_hw_semaphore_82573(sc);
   12429 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12430 		    device_xname(sc->sc_dev));
   12431 		return -1;
   12432 	}
   12433 
   12434 	return 0;
   12435 }
   12436 
   12437 static void
   12438 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12439 {
   12440 	uint32_t reg;
   12441 
   12442 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12443 		device_xname(sc->sc_dev), __func__));
   12444 
   12445 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12446 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12447 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12448 }
   12449 
   12450 /*
   12451  * Management mode and power management related subroutines.
   12452  * BMC, AMT, suspend/resume and EEE.
   12453  */
   12454 
   12455 #ifdef WM_WOL
   12456 static int
   12457 wm_check_mng_mode(struct wm_softc *sc)
   12458 {
   12459 	int rv;
   12460 
   12461 	switch (sc->sc_type) {
   12462 	case WM_T_ICH8:
   12463 	case WM_T_ICH9:
   12464 	case WM_T_ICH10:
   12465 	case WM_T_PCH:
   12466 	case WM_T_PCH2:
   12467 	case WM_T_PCH_LPT:
   12468 	case WM_T_PCH_SPT:
   12469 		rv = wm_check_mng_mode_ich8lan(sc);
   12470 		break;
   12471 	case WM_T_82574:
   12472 	case WM_T_82583:
   12473 		rv = wm_check_mng_mode_82574(sc);
   12474 		break;
   12475 	case WM_T_82571:
   12476 	case WM_T_82572:
   12477 	case WM_T_82573:
   12478 	case WM_T_80003:
   12479 		rv = wm_check_mng_mode_generic(sc);
   12480 		break;
   12481 	default:
   12482 		/* noting to do */
   12483 		rv = 0;
   12484 		break;
   12485 	}
   12486 
   12487 	return rv;
   12488 }
   12489 
   12490 static int
   12491 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12492 {
   12493 	uint32_t fwsm;
   12494 
   12495 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12496 
   12497 	if (((fwsm & FWSM_FW_VALID) != 0)
   12498 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12499 		return 1;
   12500 
   12501 	return 0;
   12502 }
   12503 
   12504 static int
   12505 wm_check_mng_mode_82574(struct wm_softc *sc)
   12506 {
   12507 	uint16_t data;
   12508 
   12509 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12510 
   12511 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12512 		return 1;
   12513 
   12514 	return 0;
   12515 }
   12516 
   12517 static int
   12518 wm_check_mng_mode_generic(struct wm_softc *sc)
   12519 {
   12520 	uint32_t fwsm;
   12521 
   12522 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12523 
   12524 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12525 		return 1;
   12526 
   12527 	return 0;
   12528 }
   12529 #endif /* WM_WOL */
   12530 
   12531 static int
   12532 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12533 {
   12534 	uint32_t manc, fwsm, factps;
   12535 
   12536 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12537 		return 0;
   12538 
   12539 	manc = CSR_READ(sc, WMREG_MANC);
   12540 
   12541 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12542 		device_xname(sc->sc_dev), manc));
   12543 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12544 		return 0;
   12545 
   12546 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12547 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12548 		factps = CSR_READ(sc, WMREG_FACTPS);
   12549 		if (((factps & FACTPS_MNGCG) == 0)
   12550 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12551 			return 1;
   12552 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12553 		uint16_t data;
   12554 
   12555 		factps = CSR_READ(sc, WMREG_FACTPS);
   12556 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12557 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12558 			device_xname(sc->sc_dev), factps, data));
   12559 		if (((factps & FACTPS_MNGCG) == 0)
   12560 		    && ((data & NVM_CFG2_MNGM_MASK)
   12561 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12562 			return 1;
   12563 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12564 	    && ((manc & MANC_ASF_EN) == 0))
   12565 		return 1;
   12566 
   12567 	return 0;
   12568 }
   12569 
   12570 static bool
   12571 wm_phy_resetisblocked(struct wm_softc *sc)
   12572 {
   12573 	bool blocked = false;
   12574 	uint32_t reg;
   12575 	int i = 0;
   12576 
   12577 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12578 		device_xname(sc->sc_dev), __func__));
   12579 
   12580 	switch (sc->sc_type) {
   12581 	case WM_T_ICH8:
   12582 	case WM_T_ICH9:
   12583 	case WM_T_ICH10:
   12584 	case WM_T_PCH:
   12585 	case WM_T_PCH2:
   12586 	case WM_T_PCH_LPT:
   12587 	case WM_T_PCH_SPT:
   12588 		do {
   12589 			reg = CSR_READ(sc, WMREG_FWSM);
   12590 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12591 				blocked = true;
   12592 				delay(10*1000);
   12593 				continue;
   12594 			}
   12595 			blocked = false;
   12596 		} while (blocked && (i++ < 30));
   12597 		return blocked;
   12598 		break;
   12599 	case WM_T_82571:
   12600 	case WM_T_82572:
   12601 	case WM_T_82573:
   12602 	case WM_T_82574:
   12603 	case WM_T_82583:
   12604 	case WM_T_80003:
   12605 		reg = CSR_READ(sc, WMREG_MANC);
   12606 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12607 			return true;
   12608 		else
   12609 			return false;
   12610 		break;
   12611 	default:
   12612 		/* no problem */
   12613 		break;
   12614 	}
   12615 
   12616 	return false;
   12617 }
   12618 
   12619 static void
   12620 wm_get_hw_control(struct wm_softc *sc)
   12621 {
   12622 	uint32_t reg;
   12623 
   12624 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12625 		device_xname(sc->sc_dev), __func__));
   12626 
   12627 	if (sc->sc_type == WM_T_82573) {
   12628 		reg = CSR_READ(sc, WMREG_SWSM);
   12629 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12630 	} else if (sc->sc_type >= WM_T_82571) {
   12631 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12632 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12633 	}
   12634 }
   12635 
   12636 static void
   12637 wm_release_hw_control(struct wm_softc *sc)
   12638 {
   12639 	uint32_t reg;
   12640 
   12641 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12642 		device_xname(sc->sc_dev), __func__));
   12643 
   12644 	if (sc->sc_type == WM_T_82573) {
   12645 		reg = CSR_READ(sc, WMREG_SWSM);
   12646 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12647 	} else if (sc->sc_type >= WM_T_82571) {
   12648 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12649 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12650 	}
   12651 }
   12652 
   12653 static void
   12654 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12655 {
   12656 	uint32_t reg;
   12657 
   12658 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12659 		device_xname(sc->sc_dev), __func__));
   12660 
   12661 	if (sc->sc_type < WM_T_PCH2)
   12662 		return;
   12663 
   12664 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12665 
   12666 	if (gate)
   12667 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12668 	else
   12669 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12670 
   12671 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12672 }
   12673 
   12674 static void
   12675 wm_smbustopci(struct wm_softc *sc)
   12676 {
   12677 	uint32_t fwsm, reg;
   12678 	int rv = 0;
   12679 
   12680 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12681 		device_xname(sc->sc_dev), __func__));
   12682 
   12683 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12684 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12685 
   12686 	/* Disable ULP */
   12687 	wm_ulp_disable(sc);
   12688 
   12689 	/* Acquire PHY semaphore */
   12690 	sc->phy.acquire(sc);
   12691 
   12692 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12693 	switch (sc->sc_type) {
   12694 	case WM_T_PCH_LPT:
   12695 	case WM_T_PCH_SPT:
   12696 		if (wm_phy_is_accessible_pchlan(sc))
   12697 			break;
   12698 
   12699 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12700 		reg |= CTRL_EXT_FORCE_SMBUS;
   12701 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12702 #if 0
   12703 		/* XXX Isn't this required??? */
   12704 		CSR_WRITE_FLUSH(sc);
   12705 #endif
   12706 		delay(50 * 1000);
   12707 		/* FALLTHROUGH */
   12708 	case WM_T_PCH2:
   12709 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12710 			break;
   12711 		/* FALLTHROUGH */
   12712 	case WM_T_PCH:
   12713 		if (sc->sc_type == WM_T_PCH)
   12714 			if ((fwsm & FWSM_FW_VALID) != 0)
   12715 				break;
   12716 
   12717 		if (wm_phy_resetisblocked(sc) == true) {
   12718 			printf("XXX reset is blocked(3)\n");
   12719 			break;
   12720 		}
   12721 
   12722 		wm_toggle_lanphypc_pch_lpt(sc);
   12723 
   12724 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12725 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12726 				break;
   12727 
   12728 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12729 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12730 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12731 
   12732 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12733 				break;
   12734 			rv = -1;
   12735 		}
   12736 		break;
   12737 	default:
   12738 		break;
   12739 	}
   12740 
   12741 	/* Release semaphore */
   12742 	sc->phy.release(sc);
   12743 
   12744 	if (rv == 0) {
   12745 		if (wm_phy_resetisblocked(sc)) {
   12746 			printf("XXX reset is blocked(4)\n");
   12747 			goto out;
   12748 		}
   12749 		wm_reset_phy(sc);
   12750 		if (wm_phy_resetisblocked(sc))
   12751 			printf("XXX reset is blocked(4)\n");
   12752 	}
   12753 
   12754 out:
   12755 	/*
   12756 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12757 	 */
   12758 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12759 		delay(10*1000);
   12760 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12761 	}
   12762 }
   12763 
   12764 static void
   12765 wm_init_manageability(struct wm_softc *sc)
   12766 {
   12767 
   12768 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12769 		device_xname(sc->sc_dev), __func__));
   12770 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12771 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12772 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12773 
   12774 		/* Disable hardware interception of ARP */
   12775 		manc &= ~MANC_ARP_EN;
   12776 
   12777 		/* Enable receiving management packets to the host */
   12778 		if (sc->sc_type >= WM_T_82571) {
   12779 			manc |= MANC_EN_MNG2HOST;
   12780 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12781 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12782 		}
   12783 
   12784 		CSR_WRITE(sc, WMREG_MANC, manc);
   12785 	}
   12786 }
   12787 
   12788 static void
   12789 wm_release_manageability(struct wm_softc *sc)
   12790 {
   12791 
   12792 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12793 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12794 
   12795 		manc |= MANC_ARP_EN;
   12796 		if (sc->sc_type >= WM_T_82571)
   12797 			manc &= ~MANC_EN_MNG2HOST;
   12798 
   12799 		CSR_WRITE(sc, WMREG_MANC, manc);
   12800 	}
   12801 }
   12802 
   12803 static void
   12804 wm_get_wakeup(struct wm_softc *sc)
   12805 {
   12806 
   12807 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   12808 	switch (sc->sc_type) {
   12809 	case WM_T_82573:
   12810 	case WM_T_82583:
   12811 		sc->sc_flags |= WM_F_HAS_AMT;
   12812 		/* FALLTHROUGH */
   12813 	case WM_T_80003:
   12814 	case WM_T_82575:
   12815 	case WM_T_82576:
   12816 	case WM_T_82580:
   12817 	case WM_T_I350:
   12818 	case WM_T_I354:
   12819 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   12820 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   12821 		/* FALLTHROUGH */
   12822 	case WM_T_82541:
   12823 	case WM_T_82541_2:
   12824 	case WM_T_82547:
   12825 	case WM_T_82547_2:
   12826 	case WM_T_82571:
   12827 	case WM_T_82572:
   12828 	case WM_T_82574:
   12829 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12830 		break;
   12831 	case WM_T_ICH8:
   12832 	case WM_T_ICH9:
   12833 	case WM_T_ICH10:
   12834 	case WM_T_PCH:
   12835 	case WM_T_PCH2:
   12836 	case WM_T_PCH_LPT:
   12837 	case WM_T_PCH_SPT:
   12838 		sc->sc_flags |= WM_F_HAS_AMT;
   12839 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12840 		break;
   12841 	default:
   12842 		break;
   12843 	}
   12844 
   12845 	/* 1: HAS_MANAGE */
   12846 	if (wm_enable_mng_pass_thru(sc) != 0)
   12847 		sc->sc_flags |= WM_F_HAS_MANAGE;
   12848 
   12849 #ifdef WM_DEBUG
   12850 	printf("\n");
   12851 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   12852 		printf("HAS_AMT,");
   12853 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   12854 		printf("ARC_SUBSYS_VALID,");
   12855 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   12856 		printf("ASF_FIRMWARE_PRES,");
   12857 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   12858 		printf("HAS_MANAGE,");
   12859 	printf("\n");
   12860 #endif
   12861 	/*
   12862 	 * Note that the WOL flags is set after the resetting of the eeprom
   12863 	 * stuff
   12864 	 */
   12865 }
   12866 
   12867 /*
   12868  * Unconfigure Ultra Low Power mode.
   12869  * Only for I217 and newer (see below).
   12870  */
   12871 static void
   12872 wm_ulp_disable(struct wm_softc *sc)
   12873 {
   12874 	uint32_t reg;
   12875 	int i = 0;
   12876 
   12877 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12878 		device_xname(sc->sc_dev), __func__));
   12879 	/* Exclude old devices */
   12880 	if ((sc->sc_type < WM_T_PCH_LPT)
   12881 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   12882 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   12883 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   12884 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   12885 		return;
   12886 
   12887 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   12888 		/* Request ME un-configure ULP mode in the PHY */
   12889 		reg = CSR_READ(sc, WMREG_H2ME);
   12890 		reg &= ~H2ME_ULP;
   12891 		reg |= H2ME_ENFORCE_SETTINGS;
   12892 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12893 
   12894 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   12895 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   12896 			if (i++ == 30) {
   12897 				printf("%s timed out\n", __func__);
   12898 				return;
   12899 			}
   12900 			delay(10 * 1000);
   12901 		}
   12902 		reg = CSR_READ(sc, WMREG_H2ME);
   12903 		reg &= ~H2ME_ENFORCE_SETTINGS;
   12904 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12905 
   12906 		return;
   12907 	}
   12908 
   12909 	/* Acquire semaphore */
   12910 	sc->phy.acquire(sc);
   12911 
   12912 	/* Toggle LANPHYPC */
   12913 	wm_toggle_lanphypc_pch_lpt(sc);
   12914 
   12915 	/* Unforce SMBus mode in PHY */
   12916 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12917 	if (reg == 0x0000 || reg == 0xffff) {
   12918 		uint32_t reg2;
   12919 
   12920 		printf("%s: Force SMBus first.\n", __func__);
   12921 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   12922 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   12923 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   12924 		delay(50 * 1000);
   12925 
   12926 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12927 	}
   12928 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   12929 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   12930 
   12931 	/* Unforce SMBus mode in MAC */
   12932 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12933 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   12934 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12935 
   12936 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   12937 	reg |= HV_PM_CTRL_K1_ENA;
   12938 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   12939 
   12940 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   12941 	reg &= ~(I218_ULP_CONFIG1_IND
   12942 	    | I218_ULP_CONFIG1_STICKY_ULP
   12943 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   12944 	    | I218_ULP_CONFIG1_WOL_HOST
   12945 	    | I218_ULP_CONFIG1_INBAND_EXIT
   12946 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   12947 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   12948 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   12949 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12950 	reg |= I218_ULP_CONFIG1_START;
   12951 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12952 
   12953 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   12954 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   12955 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   12956 
   12957 	/* Release semaphore */
   12958 	sc->phy.release(sc);
   12959 	wm_gmii_reset(sc);
   12960 	delay(50 * 1000);
   12961 }
   12962 
   12963 /* WOL in the newer chipset interfaces (pchlan) */
   12964 static void
   12965 wm_enable_phy_wakeup(struct wm_softc *sc)
   12966 {
   12967 #if 0
   12968 	uint16_t preg;
   12969 
   12970 	/* Copy MAC RARs to PHY RARs */
   12971 
   12972 	/* Copy MAC MTA to PHY MTA */
   12973 
   12974 	/* Configure PHY Rx Control register */
   12975 
   12976 	/* Enable PHY wakeup in MAC register */
   12977 
   12978 	/* Configure and enable PHY wakeup in PHY registers */
   12979 
   12980 	/* Activate PHY wakeup */
   12981 
   12982 	/* XXX */
   12983 #endif
   12984 }
   12985 
   12986 /* Power down workaround on D3 */
   12987 static void
   12988 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   12989 {
   12990 	uint32_t reg;
   12991 	int i;
   12992 
   12993 	for (i = 0; i < 2; i++) {
   12994 		/* Disable link */
   12995 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12996 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12997 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12998 
   12999 		/*
   13000 		 * Call gig speed drop workaround on Gig disable before
   13001 		 * accessing any PHY registers
   13002 		 */
   13003 		if (sc->sc_type == WM_T_ICH8)
   13004 			wm_gig_downshift_workaround_ich8lan(sc);
   13005 
   13006 		/* Write VR power-down enable */
   13007 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13008 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13009 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13010 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13011 
   13012 		/* Read it back and test */
   13013 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13014 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13015 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13016 			break;
   13017 
   13018 		/* Issue PHY reset and repeat at most one more time */
   13019 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13020 	}
   13021 }
   13022 
   13023 static void
   13024 wm_enable_wakeup(struct wm_softc *sc)
   13025 {
   13026 	uint32_t reg, pmreg;
   13027 	pcireg_t pmode;
   13028 
   13029 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13030 		device_xname(sc->sc_dev), __func__));
   13031 
   13032 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13033 		&pmreg, NULL) == 0)
   13034 		return;
   13035 
   13036 	/* Advertise the wakeup capability */
   13037 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13038 	    | CTRL_SWDPIN(3));
   13039 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13040 
   13041 	/* ICH workaround */
   13042 	switch (sc->sc_type) {
   13043 	case WM_T_ICH8:
   13044 	case WM_T_ICH9:
   13045 	case WM_T_ICH10:
   13046 	case WM_T_PCH:
   13047 	case WM_T_PCH2:
   13048 	case WM_T_PCH_LPT:
   13049 	case WM_T_PCH_SPT:
   13050 		/* Disable gig during WOL */
   13051 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13052 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13053 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13054 		if (sc->sc_type == WM_T_PCH)
   13055 			wm_gmii_reset(sc);
   13056 
   13057 		/* Power down workaround */
   13058 		if (sc->sc_phytype == WMPHY_82577) {
   13059 			struct mii_softc *child;
   13060 
   13061 			/* Assume that the PHY is copper */
   13062 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13063 			if (child->mii_mpd_rev <= 2)
   13064 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13065 				    (768 << 5) | 25, 0x0444); /* magic num */
   13066 		}
   13067 		break;
   13068 	default:
   13069 		break;
   13070 	}
   13071 
   13072 	/* Keep the laser running on fiber adapters */
   13073 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13074 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13075 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13076 		reg |= CTRL_EXT_SWDPIN(3);
   13077 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13078 	}
   13079 
   13080 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13081 #if 0	/* for the multicast packet */
   13082 	reg |= WUFC_MC;
   13083 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13084 #endif
   13085 
   13086 	if (sc->sc_type >= WM_T_PCH)
   13087 		wm_enable_phy_wakeup(sc);
   13088 	else {
   13089 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13090 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13091 	}
   13092 
   13093 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13094 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13095 		|| (sc->sc_type == WM_T_PCH2))
   13096 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13097 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13098 
   13099 	/* Request PME */
   13100 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13101 #if 0
   13102 	/* Disable WOL */
   13103 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13104 #else
   13105 	/* For WOL */
   13106 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13107 #endif
   13108 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13109 }
   13110 
   13111 /* LPLU */
   13112 
   13113 static void
   13114 wm_lplu_d0_disable(struct wm_softc *sc)
   13115 {
   13116 	uint32_t reg;
   13117 
   13118 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13119 		device_xname(sc->sc_dev), __func__));
   13120 
   13121 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13122 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13123 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13124 }
   13125 
   13126 static void
   13127 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   13128 {
   13129 	uint32_t reg;
   13130 
   13131 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13132 		device_xname(sc->sc_dev), __func__));
   13133 
   13134 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13135 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13136 	reg |= HV_OEM_BITS_ANEGNOW;
   13137 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13138 }
   13139 
   13140 /* EEE */
   13141 
   13142 static void
   13143 wm_set_eee_i350(struct wm_softc *sc)
   13144 {
   13145 	uint32_t ipcnfg, eeer;
   13146 
   13147 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13148 	eeer = CSR_READ(sc, WMREG_EEER);
   13149 
   13150 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13151 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13152 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13153 		    | EEER_LPI_FC);
   13154 	} else {
   13155 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13156 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13157 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13158 		    | EEER_LPI_FC);
   13159 	}
   13160 
   13161 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13162 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13163 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13164 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13165 }
   13166 
   13167 /*
   13168  * Workarounds (mainly PHY related).
   13169  * Basically, PHY's workarounds are in the PHY drivers.
   13170  */
   13171 
   13172 /* Work-around for 82566 Kumeran PCS lock loss */
   13173 static void
   13174 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13175 {
   13176 #if 0
   13177 	int miistatus, active, i;
   13178 	int reg;
   13179 
   13180 	miistatus = sc->sc_mii.mii_media_status;
   13181 
   13182 	/* If the link is not up, do nothing */
   13183 	if ((miistatus & IFM_ACTIVE) == 0)
   13184 		return;
   13185 
   13186 	active = sc->sc_mii.mii_media_active;
   13187 
   13188 	/* Nothing to do if the link is other than 1Gbps */
   13189 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   13190 		return;
   13191 
   13192 	for (i = 0; i < 10; i++) {
   13193 		/* read twice */
   13194 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13195 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13196 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13197 			goto out;	/* GOOD! */
   13198 
   13199 		/* Reset the PHY */
   13200 		wm_gmii_reset(sc);
   13201 		delay(5*1000);
   13202 	}
   13203 
   13204 	/* Disable GigE link negotiation */
   13205 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13206 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13207 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13208 
   13209 	/*
   13210 	 * Call gig speed drop workaround on Gig disable before accessing
   13211 	 * any PHY registers.
   13212 	 */
   13213 	wm_gig_downshift_workaround_ich8lan(sc);
   13214 
   13215 out:
   13216 	return;
   13217 #endif
   13218 }
   13219 
   13220 /* WOL from S5 stops working */
   13221 static void
   13222 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13223 {
   13224 	uint16_t kmrn_reg;
   13225 
   13226 	/* Only for igp3 */
   13227 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13228 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13229 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13230 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13231 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13232 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13233 	}
   13234 }
   13235 
   13236 /*
   13237  * Workaround for pch's PHYs
   13238  * XXX should be moved to new PHY driver?
   13239  */
   13240 static void
   13241 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13242 {
   13243 
   13244 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13245 		device_xname(sc->sc_dev), __func__));
   13246 	KASSERT(sc->sc_type == WM_T_PCH);
   13247 
   13248 	if (sc->sc_phytype == WMPHY_82577)
   13249 		wm_set_mdio_slow_mode_hv(sc);
   13250 
   13251 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13252 
   13253 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13254 
   13255 	/* 82578 */
   13256 	if (sc->sc_phytype == WMPHY_82578) {
   13257 		struct mii_softc *child;
   13258 
   13259 		/*
   13260 		 * Return registers to default by doing a soft reset then
   13261 		 * writing 0x3140 to the control register
   13262 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13263 		 */
   13264 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13265 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13266 			PHY_RESET(child);
   13267 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13268 			    0x3140);
   13269 		}
   13270 	}
   13271 
   13272 	/* Select page 0 */
   13273 	sc->phy.acquire(sc);
   13274 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13275 	sc->phy.release(sc);
   13276 
   13277 	/*
   13278 	 * Configure the K1 Si workaround during phy reset assuming there is
   13279 	 * link so that it disables K1 if link is in 1Gbps.
   13280 	 */
   13281 	wm_k1_gig_workaround_hv(sc, 1);
   13282 }
   13283 
   13284 static void
   13285 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13286 {
   13287 
   13288 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13289 		device_xname(sc->sc_dev), __func__));
   13290 	KASSERT(sc->sc_type == WM_T_PCH2);
   13291 
   13292 	wm_set_mdio_slow_mode_hv(sc);
   13293 }
   13294 
   13295 static int
   13296 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13297 {
   13298 	int k1_enable = sc->sc_nvm_k1_enabled;
   13299 
   13300 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13301 		device_xname(sc->sc_dev), __func__));
   13302 
   13303 	if (sc->phy.acquire(sc) != 0)
   13304 		return -1;
   13305 
   13306 	if (link) {
   13307 		k1_enable = 0;
   13308 
   13309 		/* Link stall fix for link up */
   13310 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13311 	} else {
   13312 		/* Link stall fix for link down */
   13313 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13314 	}
   13315 
   13316 	wm_configure_k1_ich8lan(sc, k1_enable);
   13317 	sc->phy.release(sc);
   13318 
   13319 	return 0;
   13320 }
   13321 
   13322 static void
   13323 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13324 {
   13325 	uint32_t reg;
   13326 
   13327 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13328 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13329 	    reg | HV_KMRN_MDIO_SLOW);
   13330 }
   13331 
   13332 static void
   13333 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13334 {
   13335 	uint32_t ctrl, ctrl_ext, tmp;
   13336 	uint16_t kmrn_reg;
   13337 
   13338 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13339 
   13340 	if (k1_enable)
   13341 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13342 	else
   13343 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13344 
   13345 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13346 
   13347 	delay(20);
   13348 
   13349 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13350 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13351 
   13352 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13353 	tmp |= CTRL_FRCSPD;
   13354 
   13355 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13356 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13357 	CSR_WRITE_FLUSH(sc);
   13358 	delay(20);
   13359 
   13360 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13361 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13362 	CSR_WRITE_FLUSH(sc);
   13363 	delay(20);
   13364 }
   13365 
   13366 /* special case - for 82575 - need to do manual init ... */
   13367 static void
   13368 wm_reset_init_script_82575(struct wm_softc *sc)
   13369 {
   13370 	/*
   13371 	 * remark: this is untested code - we have no board without EEPROM
   13372 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13373 	 */
   13374 
   13375 	/* SerDes configuration via SERDESCTRL */
   13376 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13377 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13378 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13379 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13380 
   13381 	/* CCM configuration via CCMCTL register */
   13382 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13383 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13384 
   13385 	/* PCIe lanes configuration */
   13386 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13387 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13388 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13389 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13390 
   13391 	/* PCIe PLL Configuration */
   13392 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13393 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13394 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13395 }
   13396 
   13397 static void
   13398 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13399 {
   13400 	uint32_t reg;
   13401 	uint16_t nvmword;
   13402 	int rv;
   13403 
   13404 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13405 		return;
   13406 
   13407 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13408 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13409 	if (rv != 0) {
   13410 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13411 		    __func__);
   13412 		return;
   13413 	}
   13414 
   13415 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13416 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13417 		reg |= MDICNFG_DEST;
   13418 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13419 		reg |= MDICNFG_COM_MDIO;
   13420 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13421 }
   13422 
   13423 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13424 
   13425 static bool
   13426 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13427 {
   13428 	int i;
   13429 	uint32_t reg;
   13430 	uint16_t id1, id2;
   13431 
   13432 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13433 		device_xname(sc->sc_dev), __func__));
   13434 	id1 = id2 = 0xffff;
   13435 	for (i = 0; i < 2; i++) {
   13436 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13437 		if (MII_INVALIDID(id1))
   13438 			continue;
   13439 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13440 		if (MII_INVALIDID(id2))
   13441 			continue;
   13442 		break;
   13443 	}
   13444 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13445 		goto out;
   13446 	}
   13447 
   13448 	if (sc->sc_type < WM_T_PCH_LPT) {
   13449 		sc->phy.release(sc);
   13450 		wm_set_mdio_slow_mode_hv(sc);
   13451 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13452 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13453 		sc->phy.acquire(sc);
   13454 	}
   13455 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13456 		printf("XXX return with false\n");
   13457 		return false;
   13458 	}
   13459 out:
   13460 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13461 		/* Only unforce SMBus if ME is not active */
   13462 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13463 			/* Unforce SMBus mode in PHY */
   13464 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13465 			    CV_SMB_CTRL);
   13466 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13467 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13468 			    CV_SMB_CTRL, reg);
   13469 
   13470 			/* Unforce SMBus mode in MAC */
   13471 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13472 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13473 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13474 		}
   13475 	}
   13476 	return true;
   13477 }
   13478 
   13479 static void
   13480 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13481 {
   13482 	uint32_t reg;
   13483 	int i;
   13484 
   13485 	/* Set PHY Config Counter to 50msec */
   13486 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13487 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13488 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13489 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13490 
   13491 	/* Toggle LANPHYPC */
   13492 	reg = CSR_READ(sc, WMREG_CTRL);
   13493 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13494 	reg &= ~CTRL_LANPHYPC_VALUE;
   13495 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13496 	CSR_WRITE_FLUSH(sc);
   13497 	delay(1000);
   13498 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13499 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13500 	CSR_WRITE_FLUSH(sc);
   13501 
   13502 	if (sc->sc_type < WM_T_PCH_LPT)
   13503 		delay(50 * 1000);
   13504 	else {
   13505 		i = 20;
   13506 
   13507 		do {
   13508 			delay(5 * 1000);
   13509 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13510 		    && i--);
   13511 
   13512 		delay(30 * 1000);
   13513 	}
   13514 }
   13515 
   13516 static int
   13517 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13518 {
   13519 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13520 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13521 	uint32_t rxa;
   13522 	uint16_t scale = 0, lat_enc = 0;
   13523 	int64_t lat_ns, value;
   13524 
   13525 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13526 		device_xname(sc->sc_dev), __func__));
   13527 
   13528 	if (link) {
   13529 		pcireg_t preg;
   13530 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13531 
   13532 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13533 
   13534 		/*
   13535 		 * Determine the maximum latency tolerated by the device.
   13536 		 *
   13537 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13538 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13539 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13540 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13541 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13542 		 */
   13543 		lat_ns = ((int64_t)rxa * 1024 -
   13544 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   13545 		if (lat_ns < 0)
   13546 			lat_ns = 0;
   13547 		else {
   13548 			uint32_t status;
   13549 			uint16_t speed;
   13550 
   13551 			status = CSR_READ(sc, WMREG_STATUS);
   13552 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13553 			case STATUS_SPEED_10:
   13554 				speed = 10;
   13555 				break;
   13556 			case STATUS_SPEED_100:
   13557 				speed = 100;
   13558 				break;
   13559 			case STATUS_SPEED_1000:
   13560 				speed = 1000;
   13561 				break;
   13562 			default:
   13563 				printf("%s: Unknown speed (status = %08x)\n",
   13564 				    device_xname(sc->sc_dev), status);
   13565 				return -1;
   13566 			}
   13567 			lat_ns /= speed;
   13568 		}
   13569 		value = lat_ns;
   13570 
   13571 		while (value > LTRV_VALUE) {
   13572 			scale ++;
   13573 			value = howmany(value, __BIT(5));
   13574 		}
   13575 		if (scale > LTRV_SCALE_MAX) {
   13576 			printf("%s: Invalid LTR latency scale %d\n",
   13577 			    device_xname(sc->sc_dev), scale);
   13578 			return -1;
   13579 		}
   13580 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13581 
   13582 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13583 		    WM_PCI_LTR_CAP_LPT);
   13584 		max_snoop = preg & 0xffff;
   13585 		max_nosnoop = preg >> 16;
   13586 
   13587 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13588 
   13589 		if (lat_enc > max_ltr_enc) {
   13590 			lat_enc = max_ltr_enc;
   13591 		}
   13592 	}
   13593 	/* Snoop and No-Snoop latencies the same */
   13594 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13595 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13596 
   13597 	return 0;
   13598 }
   13599 
   13600 /*
   13601  * I210 Errata 25 and I211 Errata 10
   13602  * Slow System Clock.
   13603  */
   13604 static void
   13605 wm_pll_workaround_i210(struct wm_softc *sc)
   13606 {
   13607 	uint32_t mdicnfg, wuc;
   13608 	uint32_t reg;
   13609 	pcireg_t pcireg;
   13610 	uint32_t pmreg;
   13611 	uint16_t nvmword, tmp_nvmword;
   13612 	int phyval;
   13613 	bool wa_done = false;
   13614 	int i;
   13615 
   13616 	/* Save WUC and MDICNFG registers */
   13617 	wuc = CSR_READ(sc, WMREG_WUC);
   13618 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13619 
   13620 	reg = mdicnfg & ~MDICNFG_DEST;
   13621 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13622 
   13623 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13624 		nvmword = INVM_DEFAULT_AL;
   13625 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13626 
   13627 	/* Get Power Management cap offset */
   13628 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13629 		&pmreg, NULL) == 0)
   13630 		return;
   13631 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13632 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13633 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13634 
   13635 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13636 			break; /* OK */
   13637 		}
   13638 
   13639 		wa_done = true;
   13640 		/* Directly reset the internal PHY */
   13641 		reg = CSR_READ(sc, WMREG_CTRL);
   13642 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13643 
   13644 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13645 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13646 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13647 
   13648 		CSR_WRITE(sc, WMREG_WUC, 0);
   13649 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13650 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13651 
   13652 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13653 		    pmreg + PCI_PMCSR);
   13654 		pcireg |= PCI_PMCSR_STATE_D3;
   13655 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13656 		    pmreg + PCI_PMCSR, pcireg);
   13657 		delay(1000);
   13658 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13659 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13660 		    pmreg + PCI_PMCSR, pcireg);
   13661 
   13662 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13663 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13664 
   13665 		/* Restore WUC register */
   13666 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13667 	}
   13668 
   13669 	/* Restore MDICNFG setting */
   13670 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13671 	if (wa_done)
   13672 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13673 }
   13674