Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.466
      1 /*	$NetBSD: if_wm.c,v 1.466 2017/01/16 00:09:06 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Advanced Receive Descriptor
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.466 2017/01/16 00:09:06 knakahara Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #endif
    165 
    166 /*
    167  * This device driver's max interrupt numbers.
    168  */
    169 #define WM_MAX_NQUEUEINTR	16
    170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 #define	WM_TXINTERQSIZE		256
    201 
    202 /*
    203  * Receive descriptor list size.  We have one Rx buffer for normal
    204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    205  * packet.  We allocate 256 receive descriptors, each with a 2k
    206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    207  */
    208 #define	WM_NRXDESC		256
    209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    212 
    213 typedef union txdescs {
    214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    216 } txdescs_t;
    217 
    218 typedef union rxdescs {
    219 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    220 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    221 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    222 } rxdescs_t;
    223 
    224 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    225 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    226 
    227 /*
    228  * Software state for transmit jobs.
    229  */
    230 struct wm_txsoft {
    231 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    232 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    233 	int txs_firstdesc;		/* first descriptor in packet */
    234 	int txs_lastdesc;		/* last descriptor in packet */
    235 	int txs_ndesc;			/* # of descriptors used */
    236 };
    237 
    238 /*
    239  * Software state for receive buffers.  Each descriptor gets a
    240  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    241  * more than one buffer, we chain them together.
    242  */
    243 struct wm_rxsoft {
    244 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    245 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    246 };
    247 
    248 #define WM_LINKUP_TIMEOUT	50
    249 
    250 static uint16_t swfwphysem[] = {
    251 	SWFW_PHY0_SM,
    252 	SWFW_PHY1_SM,
    253 	SWFW_PHY2_SM,
    254 	SWFW_PHY3_SM
    255 };
    256 
    257 static const uint32_t wm_82580_rxpbs_table[] = {
    258 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    259 };
    260 
    261 struct wm_softc;
    262 
    263 #ifdef WM_EVENT_COUNTERS
    264 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    265 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    266 	struct evcnt qname##_ev_##evname;
    267 
    268 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    269 	do{								\
    270 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    271 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    272 		    "%s%02d%s", #qname, (qnum), #evname);		\
    273 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    274 		    (evtype), NULL, (xname),				\
    275 		    (q)->qname##_##evname##_evcnt_name);		\
    276 	}while(0)
    277 
    278 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    279 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    280 
    281 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    282 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    283 #endif /* WM_EVENT_COUNTERS */
    284 
    285 struct wm_txqueue {
    286 	kmutex_t *txq_lock;		/* lock for tx operations */
    287 
    288 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    289 
    290 	/* Software state for the transmit descriptors. */
    291 	int txq_num;			/* must be a power of two */
    292 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    293 
    294 	/* TX control data structures. */
    295 	int txq_ndesc;			/* must be a power of two */
    296 	size_t txq_descsize;		/* a tx descriptor size */
    297 	txdescs_t *txq_descs_u;
    298         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    299 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    300 	int txq_desc_rseg;		/* real number of control segment */
    301 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    302 #define	txq_descs	txq_descs_u->sctxu_txdescs
    303 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    304 
    305 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    306 
    307 	int txq_free;			/* number of free Tx descriptors */
    308 	int txq_next;			/* next ready Tx descriptor */
    309 
    310 	int txq_sfree;			/* number of free Tx jobs */
    311 	int txq_snext;			/* next free Tx job */
    312 	int txq_sdirty;			/* dirty Tx jobs */
    313 
    314 	/* These 4 variables are used only on the 82547. */
    315 	int txq_fifo_size;		/* Tx FIFO size */
    316 	int txq_fifo_head;		/* current head of FIFO */
    317 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    318 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    319 
    320 	/*
    321 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    322 	 * CPUs. This queue intermediate them without block.
    323 	 */
    324 	pcq_t *txq_interq;
    325 
    326 	/*
    327 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    328 	 * to manage Tx H/W queue's busy flag.
    329 	 */
    330 	int txq_flags;			/* flags for H/W queue, see below */
    331 #define	WM_TXQ_NO_SPACE	0x1
    332 
    333 	bool txq_stopping;
    334 
    335 #ifdef WM_EVENT_COUNTERS
    336 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    337 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    338 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    339 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    340 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    341 						/* XXX not used? */
    342 
    343 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    344 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    345 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    346 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    347 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    348 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    349 
    350 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    351 
    352 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    353 
    354 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    355 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    356 #endif /* WM_EVENT_COUNTERS */
    357 };
    358 
    359 struct wm_rxqueue {
    360 	kmutex_t *rxq_lock;		/* lock for rx operations */
    361 
    362 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    363 
    364 	/* Software state for the receive descriptors. */
    365 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    366 
    367 	/* RX control data structures. */
    368 	int rxq_ndesc;			/* must be a power of two */
    369 	size_t rxq_descsize;		/* a rx descriptor size */
    370 	rxdescs_t *rxq_descs_u;
    371 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    372 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    373 	int rxq_desc_rseg;		/* real number of control segment */
    374 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    375 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    376 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    377 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    378 
    379 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    380 
    381 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    382 	int rxq_discard;
    383 	int rxq_len;
    384 	struct mbuf *rxq_head;
    385 	struct mbuf *rxq_tail;
    386 	struct mbuf **rxq_tailp;
    387 
    388 	bool rxq_stopping;
    389 
    390 #ifdef WM_EVENT_COUNTERS
    391 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    392 
    393 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    394 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    395 #endif
    396 };
    397 
    398 struct wm_queue {
    399 	int wmq_id;			/* index of transmit and receive queues */
    400 	int wmq_intr_idx;		/* index of MSI-X tables */
    401 
    402 	struct wm_txqueue wmq_txq;
    403 	struct wm_rxqueue wmq_rxq;
    404 };
    405 
    406 struct wm_phyop {
    407 	int (*acquire)(struct wm_softc *);
    408 	void (*release)(struct wm_softc *);
    409 	int reset_delay_us;
    410 };
    411 
    412 /*
    413  * Software state per device.
    414  */
    415 struct wm_softc {
    416 	device_t sc_dev;		/* generic device information */
    417 	bus_space_tag_t sc_st;		/* bus space tag */
    418 	bus_space_handle_t sc_sh;	/* bus space handle */
    419 	bus_size_t sc_ss;		/* bus space size */
    420 	bus_space_tag_t sc_iot;		/* I/O space tag */
    421 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    422 	bus_size_t sc_ios;		/* I/O space size */
    423 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    424 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    425 	bus_size_t sc_flashs;		/* flash registers space size */
    426 	off_t sc_flashreg_offset;	/*
    427 					 * offset to flash registers from
    428 					 * start of BAR
    429 					 */
    430 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    431 
    432 	struct ethercom sc_ethercom;	/* ethernet common data */
    433 	struct mii_data sc_mii;		/* MII/media information */
    434 
    435 	pci_chipset_tag_t sc_pc;
    436 	pcitag_t sc_pcitag;
    437 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    438 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    439 
    440 	uint16_t sc_pcidevid;		/* PCI device ID */
    441 	wm_chip_type sc_type;		/* MAC type */
    442 	int sc_rev;			/* MAC revision */
    443 	wm_phy_type sc_phytype;		/* PHY type */
    444 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    445 #define	WM_MEDIATYPE_UNKNOWN		0x00
    446 #define	WM_MEDIATYPE_FIBER		0x01
    447 #define	WM_MEDIATYPE_COPPER		0x02
    448 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    449 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    450 	int sc_flags;			/* flags; see below */
    451 	int sc_if_flags;		/* last if_flags */
    452 	int sc_flowflags;		/* 802.3x flow control flags */
    453 	int sc_align_tweak;
    454 
    455 	void *sc_ihs[WM_MAX_NINTR];	/*
    456 					 * interrupt cookie.
    457 					 * legacy and msi use sc_ihs[0].
    458 					 */
    459 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    460 	int sc_nintrs;			/* number of interrupts */
    461 
    462 	int sc_link_intr_idx;		/* index of MSI-X tables */
    463 
    464 	callout_t sc_tick_ch;		/* tick callout */
    465 	bool sc_core_stopping;
    466 
    467 	int sc_nvm_ver_major;
    468 	int sc_nvm_ver_minor;
    469 	int sc_nvm_ver_build;
    470 	int sc_nvm_addrbits;		/* NVM address bits */
    471 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    472 	int sc_ich8_flash_base;
    473 	int sc_ich8_flash_bank_size;
    474 	int sc_nvm_k1_enabled;
    475 
    476 	int sc_nqueues;
    477 	struct wm_queue *sc_queue;
    478 
    479 	int sc_affinity_offset;
    480 
    481 #ifdef WM_EVENT_COUNTERS
    482 	/* Event counters. */
    483 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    484 
    485         /* WM_T_82542_2_1 only */
    486 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    487 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    488 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    489 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    490 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    491 #endif /* WM_EVENT_COUNTERS */
    492 
    493 	/* This variable are used only on the 82547. */
    494 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    495 
    496 	uint32_t sc_ctrl;		/* prototype CTRL register */
    497 #if 0
    498 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    499 #endif
    500 	uint32_t sc_icr;		/* prototype interrupt bits */
    501 	uint32_t sc_itr;		/* prototype intr throttling reg */
    502 	uint32_t sc_tctl;		/* prototype TCTL register */
    503 	uint32_t sc_rctl;		/* prototype RCTL register */
    504 	uint32_t sc_txcw;		/* prototype TXCW register */
    505 	uint32_t sc_tipg;		/* prototype TIPG register */
    506 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    507 	uint32_t sc_pba;		/* prototype PBA register */
    508 
    509 	int sc_tbi_linkup;		/* TBI link status */
    510 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    511 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    512 
    513 	int sc_mchash_type;		/* multicast filter offset */
    514 
    515 	krndsource_t rnd_source;	/* random source */
    516 
    517 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    518 
    519 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    520 	kmutex_t *sc_ich_phymtx;	/*
    521 					 * 82574/82583/ICH/PCH specific PHY
    522 					 * mutex. For 82574/82583, the mutex
    523 					 * is used for both PHY and NVM.
    524 					 */
    525 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    526 
    527 	struct wm_phyop phy;
    528 };
    529 
    530 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    531 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    532 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    533 
    534 #ifdef WM_MPSAFE
    535 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    536 #else
    537 #define CALLOUT_FLAGS	0
    538 #endif
    539 
    540 #define	WM_RXCHAIN_RESET(rxq)						\
    541 do {									\
    542 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    543 	*(rxq)->rxq_tailp = NULL;					\
    544 	(rxq)->rxq_len = 0;						\
    545 } while (/*CONSTCOND*/0)
    546 
    547 #define	WM_RXCHAIN_LINK(rxq, m)						\
    548 do {									\
    549 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    550 	(rxq)->rxq_tailp = &(m)->m_next;				\
    551 } while (/*CONSTCOND*/0)
    552 
    553 #ifdef WM_EVENT_COUNTERS
    554 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    555 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    556 
    557 #define WM_Q_EVCNT_INCR(qname, evname)			\
    558 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    559 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    560 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    561 #else /* !WM_EVENT_COUNTERS */
    562 #define	WM_EVCNT_INCR(ev)	/* nothing */
    563 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    564 
    565 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    566 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    567 #endif /* !WM_EVENT_COUNTERS */
    568 
    569 #define	CSR_READ(sc, reg)						\
    570 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    571 #define	CSR_WRITE(sc, reg, val)						\
    572 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    573 #define	CSR_WRITE_FLUSH(sc)						\
    574 	(void) CSR_READ((sc), WMREG_STATUS)
    575 
    576 #define ICH8_FLASH_READ32(sc, reg)					\
    577 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    578 	    (reg) + sc->sc_flashreg_offset)
    579 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    580 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    581 	    (reg) + sc->sc_flashreg_offset, (data))
    582 
    583 #define ICH8_FLASH_READ16(sc, reg)					\
    584 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    585 	    (reg) + sc->sc_flashreg_offset)
    586 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    587 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    588 	    (reg) + sc->sc_flashreg_offset, (data))
    589 
    590 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    591 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    592 
    593 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    594 #define	WM_CDTXADDR_HI(txq, x)						\
    595 	(sizeof(bus_addr_t) == 8 ?					\
    596 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    597 
    598 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    599 #define	WM_CDRXADDR_HI(rxq, x)						\
    600 	(sizeof(bus_addr_t) == 8 ?					\
    601 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    602 
    603 /*
    604  * Register read/write functions.
    605  * Other than CSR_{READ|WRITE}().
    606  */
    607 #if 0
    608 static inline uint32_t wm_io_read(struct wm_softc *, int);
    609 #endif
    610 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    611 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    612 	uint32_t, uint32_t);
    613 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    614 
    615 /*
    616  * Descriptor sync/init functions.
    617  */
    618 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    619 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    620 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    621 
    622 /*
    623  * Device driver interface functions and commonly used functions.
    624  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    625  */
    626 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    627 static int	wm_match(device_t, cfdata_t, void *);
    628 static void	wm_attach(device_t, device_t, void *);
    629 static int	wm_detach(device_t, int);
    630 static bool	wm_suspend(device_t, const pmf_qual_t *);
    631 static bool	wm_resume(device_t, const pmf_qual_t *);
    632 static void	wm_watchdog(struct ifnet *);
    633 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    634 static void	wm_tick(void *);
    635 static int	wm_ifflags_cb(struct ethercom *);
    636 static int	wm_ioctl(struct ifnet *, u_long, void *);
    637 /* MAC address related */
    638 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    639 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    640 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    641 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    642 static void	wm_set_filter(struct wm_softc *);
    643 /* Reset and init related */
    644 static void	wm_set_vlan(struct wm_softc *);
    645 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    646 static void	wm_get_auto_rd_done(struct wm_softc *);
    647 static void	wm_lan_init_done(struct wm_softc *);
    648 static void	wm_get_cfg_done(struct wm_softc *);
    649 static void	wm_initialize_hardware_bits(struct wm_softc *);
    650 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    651 static void	wm_reset_phy(struct wm_softc *);
    652 static void	wm_flush_desc_rings(struct wm_softc *);
    653 static void	wm_reset(struct wm_softc *);
    654 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    655 static void	wm_rxdrain(struct wm_rxqueue *);
    656 static void	wm_rss_getkey(uint8_t *);
    657 static void	wm_init_rss(struct wm_softc *);
    658 static void	wm_adjust_qnum(struct wm_softc *, int);
    659 static int	wm_setup_legacy(struct wm_softc *);
    660 static int	wm_setup_msix(struct wm_softc *);
    661 static int	wm_init(struct ifnet *);
    662 static int	wm_init_locked(struct ifnet *);
    663 static void	wm_turnon(struct wm_softc *);
    664 static void	wm_turnoff(struct wm_softc *);
    665 static void	wm_stop(struct ifnet *, int);
    666 static void	wm_stop_locked(struct ifnet *, int);
    667 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    668 static void	wm_82547_txfifo_stall(void *);
    669 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    670 /* DMA related */
    671 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    672 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    673 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    674 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    675     struct wm_txqueue *);
    676 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    677 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    678 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    679     struct wm_rxqueue *);
    680 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    681 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    682 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    683 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    684 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    685 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    686 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    687     struct wm_txqueue *);
    688 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    689     struct wm_rxqueue *);
    690 static int	wm_alloc_txrx_queues(struct wm_softc *);
    691 static void	wm_free_txrx_queues(struct wm_softc *);
    692 static int	wm_init_txrx_queues(struct wm_softc *);
    693 /* Start */
    694 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    695     uint32_t *, uint8_t *);
    696 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    697 static void	wm_start(struct ifnet *);
    698 static void	wm_start_locked(struct ifnet *);
    699 static int	wm_transmit(struct ifnet *, struct mbuf *);
    700 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    701 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    702 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    703     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    704 static void	wm_nq_start(struct ifnet *);
    705 static void	wm_nq_start_locked(struct ifnet *);
    706 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    707 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    708 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    709 static void	wm_deferred_start(struct ifnet *);
    710 /* Interrupt */
    711 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    712 static void	wm_rxeof(struct wm_rxqueue *);
    713 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    714 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    715 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    716 static void	wm_linkintr(struct wm_softc *, uint32_t);
    717 static int	wm_intr_legacy(void *);
    718 static int	wm_txrxintr_msix(void *);
    719 static int	wm_linkintr_msix(void *);
    720 
    721 /*
    722  * Media related.
    723  * GMII, SGMII, TBI, SERDES and SFP.
    724  */
    725 /* Common */
    726 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    727 /* GMII related */
    728 static void	wm_gmii_reset(struct wm_softc *);
    729 static int	wm_get_phy_id_82575(struct wm_softc *);
    730 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    731 static int	wm_gmii_mediachange(struct ifnet *);
    732 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    733 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    734 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    735 static int	wm_gmii_i82543_readreg(device_t, int, int);
    736 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    737 static int	wm_gmii_mdic_readreg(device_t, int, int);
    738 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    739 static int	wm_gmii_i82544_readreg(device_t, int, int);
    740 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    741 static int	wm_gmii_i80003_readreg(device_t, int, int);
    742 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    743 static int	wm_gmii_bm_readreg(device_t, int, int);
    744 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    745 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    746 static int	wm_gmii_hv_readreg(device_t, int, int);
    747 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    748 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    749 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    750 static int	wm_gmii_82580_readreg(device_t, int, int);
    751 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    752 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    753 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    754 static void	wm_gmii_statchg(struct ifnet *);
    755 /*
    756  * kumeran related (80003, ICH* and PCH*).
    757  * These functions are not for accessing MII registers but for accessing
    758  * kumeran specific registers.
    759  */
    760 static int	wm_kmrn_readreg(struct wm_softc *, int);
    761 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    762 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    763 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    764 /* SGMII */
    765 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    766 static int	wm_sgmii_readreg(device_t, int, int);
    767 static void	wm_sgmii_writereg(device_t, int, int, int);
    768 /* TBI related */
    769 static void	wm_tbi_mediainit(struct wm_softc *);
    770 static int	wm_tbi_mediachange(struct ifnet *);
    771 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    772 static int	wm_check_for_link(struct wm_softc *);
    773 static void	wm_tbi_tick(struct wm_softc *);
    774 /* SERDES related */
    775 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    776 static int	wm_serdes_mediachange(struct ifnet *);
    777 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    778 static void	wm_serdes_tick(struct wm_softc *);
    779 /* SFP related */
    780 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    781 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    782 
    783 /*
    784  * NVM related.
    785  * Microwire, SPI (w/wo EERD) and Flash.
    786  */
    787 /* Misc functions */
    788 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    789 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    790 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    791 /* Microwire */
    792 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    793 /* SPI */
    794 static int	wm_nvm_ready_spi(struct wm_softc *);
    795 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    796 /* Using with EERD */
    797 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    798 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    799 /* Flash */
    800 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    801     unsigned int *);
    802 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    803 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    804 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    805 	uint32_t *);
    806 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    807 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    808 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    809 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    810 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    811 /* iNVM */
    812 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    813 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    814 /* Lock, detecting NVM type, validate checksum and read */
    815 static int	wm_nvm_acquire(struct wm_softc *);
    816 static void	wm_nvm_release(struct wm_softc *);
    817 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    818 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    819 static int	wm_nvm_validate_checksum(struct wm_softc *);
    820 static void	wm_nvm_version_invm(struct wm_softc *);
    821 static void	wm_nvm_version(struct wm_softc *);
    822 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    823 
    824 /*
    825  * Hardware semaphores.
    826  * Very complexed...
    827  */
    828 static int	wm_get_null(struct wm_softc *);
    829 static void	wm_put_null(struct wm_softc *);
    830 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    831 static void	wm_put_swsm_semaphore(struct wm_softc *);
    832 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    833 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    834 static int	wm_get_phy_82575(struct wm_softc *);
    835 static void	wm_put_phy_82575(struct wm_softc *);
    836 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    837 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    838 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    839 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    840 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    841 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    842 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    843 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    844 
    845 /*
    846  * Management mode and power management related subroutines.
    847  * BMC, AMT, suspend/resume and EEE.
    848  */
    849 #if 0
    850 static int	wm_check_mng_mode(struct wm_softc *);
    851 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    852 static int	wm_check_mng_mode_82574(struct wm_softc *);
    853 static int	wm_check_mng_mode_generic(struct wm_softc *);
    854 #endif
    855 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    856 static bool	wm_phy_resetisblocked(struct wm_softc *);
    857 static void	wm_get_hw_control(struct wm_softc *);
    858 static void	wm_release_hw_control(struct wm_softc *);
    859 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    860 static void	wm_smbustopci(struct wm_softc *);
    861 static void	wm_init_manageability(struct wm_softc *);
    862 static void	wm_release_manageability(struct wm_softc *);
    863 static void	wm_get_wakeup(struct wm_softc *);
    864 static void	wm_ulp_disable(struct wm_softc *);
    865 static void	wm_enable_phy_wakeup(struct wm_softc *);
    866 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    867 static void	wm_enable_wakeup(struct wm_softc *);
    868 /* LPLU (Low Power Link Up) */
    869 static void	wm_lplu_d0_disable(struct wm_softc *);
    870 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    871 /* EEE */
    872 static void	wm_set_eee_i350(struct wm_softc *);
    873 
    874 /*
    875  * Workarounds (mainly PHY related).
    876  * Basically, PHY's workarounds are in the PHY drivers.
    877  */
    878 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    879 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    880 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    881 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    882 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    883 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    884 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    885 static void	wm_reset_init_script_82575(struct wm_softc *);
    886 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    887 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    888 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    889 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    890 static void	wm_pll_workaround_i210(struct wm_softc *);
    891 
    892 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    893     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    894 
    895 /*
    896  * Devices supported by this driver.
    897  */
    898 static const struct wm_product {
    899 	pci_vendor_id_t		wmp_vendor;
    900 	pci_product_id_t	wmp_product;
    901 	const char		*wmp_name;
    902 	wm_chip_type		wmp_type;
    903 	uint32_t		wmp_flags;
    904 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    905 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    906 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    907 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    908 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    909 } wm_products[] = {
    910 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    911 	  "Intel i82542 1000BASE-X Ethernet",
    912 	  WM_T_82542_2_1,	WMP_F_FIBER },
    913 
    914 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    915 	  "Intel i82543GC 1000BASE-X Ethernet",
    916 	  WM_T_82543,		WMP_F_FIBER },
    917 
    918 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    919 	  "Intel i82543GC 1000BASE-T Ethernet",
    920 	  WM_T_82543,		WMP_F_COPPER },
    921 
    922 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    923 	  "Intel i82544EI 1000BASE-T Ethernet",
    924 	  WM_T_82544,		WMP_F_COPPER },
    925 
    926 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    927 	  "Intel i82544EI 1000BASE-X Ethernet",
    928 	  WM_T_82544,		WMP_F_FIBER },
    929 
    930 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    931 	  "Intel i82544GC 1000BASE-T Ethernet",
    932 	  WM_T_82544,		WMP_F_COPPER },
    933 
    934 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    935 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    936 	  WM_T_82544,		WMP_F_COPPER },
    937 
    938 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    939 	  "Intel i82540EM 1000BASE-T Ethernet",
    940 	  WM_T_82540,		WMP_F_COPPER },
    941 
    942 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    943 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    944 	  WM_T_82540,		WMP_F_COPPER },
    945 
    946 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    947 	  "Intel i82540EP 1000BASE-T Ethernet",
    948 	  WM_T_82540,		WMP_F_COPPER },
    949 
    950 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    951 	  "Intel i82540EP 1000BASE-T Ethernet",
    952 	  WM_T_82540,		WMP_F_COPPER },
    953 
    954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    955 	  "Intel i82540EP 1000BASE-T Ethernet",
    956 	  WM_T_82540,		WMP_F_COPPER },
    957 
    958 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    959 	  "Intel i82545EM 1000BASE-T Ethernet",
    960 	  WM_T_82545,		WMP_F_COPPER },
    961 
    962 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    963 	  "Intel i82545GM 1000BASE-T Ethernet",
    964 	  WM_T_82545_3,		WMP_F_COPPER },
    965 
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    967 	  "Intel i82545GM 1000BASE-X Ethernet",
    968 	  WM_T_82545_3,		WMP_F_FIBER },
    969 
    970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    971 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    972 	  WM_T_82545_3,		WMP_F_SERDES },
    973 
    974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    975 	  "Intel i82546EB 1000BASE-T Ethernet",
    976 	  WM_T_82546,		WMP_F_COPPER },
    977 
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    979 	  "Intel i82546EB 1000BASE-T Ethernet",
    980 	  WM_T_82546,		WMP_F_COPPER },
    981 
    982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    983 	  "Intel i82545EM 1000BASE-X Ethernet",
    984 	  WM_T_82545,		WMP_F_FIBER },
    985 
    986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    987 	  "Intel i82546EB 1000BASE-X Ethernet",
    988 	  WM_T_82546,		WMP_F_FIBER },
    989 
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    991 	  "Intel i82546GB 1000BASE-T Ethernet",
    992 	  WM_T_82546_3,		WMP_F_COPPER },
    993 
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    995 	  "Intel i82546GB 1000BASE-X Ethernet",
    996 	  WM_T_82546_3,		WMP_F_FIBER },
    997 
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    999 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1000 	  WM_T_82546_3,		WMP_F_SERDES },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1003 	  "i82546GB quad-port Gigabit Ethernet",
   1004 	  WM_T_82546_3,		WMP_F_COPPER },
   1005 
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1007 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1008 	  WM_T_82546_3,		WMP_F_COPPER },
   1009 
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1011 	  "Intel PRO/1000MT (82546GB)",
   1012 	  WM_T_82546_3,		WMP_F_COPPER },
   1013 
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1015 	  "Intel i82541EI 1000BASE-T Ethernet",
   1016 	  WM_T_82541,		WMP_F_COPPER },
   1017 
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1019 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1020 	  WM_T_82541,		WMP_F_COPPER },
   1021 
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1023 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1024 	  WM_T_82541,		WMP_F_COPPER },
   1025 
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1027 	  "Intel i82541ER 1000BASE-T Ethernet",
   1028 	  WM_T_82541_2,		WMP_F_COPPER },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1031 	  "Intel i82541GI 1000BASE-T Ethernet",
   1032 	  WM_T_82541_2,		WMP_F_COPPER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1035 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1036 	  WM_T_82541_2,		WMP_F_COPPER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1039 	  "Intel i82541PI 1000BASE-T Ethernet",
   1040 	  WM_T_82541_2,		WMP_F_COPPER },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1043 	  "Intel i82547EI 1000BASE-T Ethernet",
   1044 	  WM_T_82547,		WMP_F_COPPER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1047 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1048 	  WM_T_82547,		WMP_F_COPPER },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1051 	  "Intel i82547GI 1000BASE-T Ethernet",
   1052 	  WM_T_82547_2,		WMP_F_COPPER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1055 	  "Intel PRO/1000 PT (82571EB)",
   1056 	  WM_T_82571,		WMP_F_COPPER },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1059 	  "Intel PRO/1000 PF (82571EB)",
   1060 	  WM_T_82571,		WMP_F_FIBER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1063 	  "Intel PRO/1000 PB (82571EB)",
   1064 	  WM_T_82571,		WMP_F_SERDES },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1067 	  "Intel PRO/1000 QT (82571EB)",
   1068 	  WM_T_82571,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1071 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1072 	  WM_T_82571,		WMP_F_COPPER, },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1075 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1076 	  WM_T_82571,		WMP_F_COPPER, },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1079 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1080 	  WM_T_82571,		WMP_F_SERDES, },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1083 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1084 	  WM_T_82571,		WMP_F_SERDES, },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1087 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1088 	  WM_T_82571,		WMP_F_FIBER, },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1091 	  "Intel i82572EI 1000baseT Ethernet",
   1092 	  WM_T_82572,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1095 	  "Intel i82572EI 1000baseX Ethernet",
   1096 	  WM_T_82572,		WMP_F_FIBER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1099 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1100 	  WM_T_82572,		WMP_F_SERDES },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1103 	  "Intel i82572EI 1000baseT Ethernet",
   1104 	  WM_T_82572,		WMP_F_COPPER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1107 	  "Intel i82573E",
   1108 	  WM_T_82573,		WMP_F_COPPER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1111 	  "Intel i82573E IAMT",
   1112 	  WM_T_82573,		WMP_F_COPPER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1115 	  "Intel i82573L Gigabit Ethernet",
   1116 	  WM_T_82573,		WMP_F_COPPER },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1119 	  "Intel i82574L",
   1120 	  WM_T_82574,		WMP_F_COPPER },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1123 	  "Intel i82574L",
   1124 	  WM_T_82574,		WMP_F_COPPER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1127 	  "Intel i82583V",
   1128 	  WM_T_82583,		WMP_F_COPPER },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1131 	  "i80003 dual 1000baseT Ethernet",
   1132 	  WM_T_80003,		WMP_F_COPPER },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1135 	  "i80003 dual 1000baseX Ethernet",
   1136 	  WM_T_80003,		WMP_F_COPPER },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1139 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1140 	  WM_T_80003,		WMP_F_SERDES },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1143 	  "Intel i80003 1000baseT Ethernet",
   1144 	  WM_T_80003,		WMP_F_COPPER },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1147 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1148 	  WM_T_80003,		WMP_F_SERDES },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1151 	  "Intel i82801H (M_AMT) LAN Controller",
   1152 	  WM_T_ICH8,		WMP_F_COPPER },
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1154 	  "Intel i82801H (AMT) LAN Controller",
   1155 	  WM_T_ICH8,		WMP_F_COPPER },
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1157 	  "Intel i82801H LAN Controller",
   1158 	  WM_T_ICH8,		WMP_F_COPPER },
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1160 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1161 	  WM_T_ICH8,		WMP_F_COPPER },
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1163 	  "Intel i82801H (M) LAN Controller",
   1164 	  WM_T_ICH8,		WMP_F_COPPER },
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1166 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1167 	  WM_T_ICH8,		WMP_F_COPPER },
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1169 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1170 	  WM_T_ICH8,		WMP_F_COPPER },
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1172 	  "82567V-3 LAN Controller",
   1173 	  WM_T_ICH8,		WMP_F_COPPER },
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1175 	  "82801I (AMT) LAN Controller",
   1176 	  WM_T_ICH9,		WMP_F_COPPER },
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1178 	  "82801I 10/100 LAN Controller",
   1179 	  WM_T_ICH9,		WMP_F_COPPER },
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1181 	  "82801I (G) 10/100 LAN Controller",
   1182 	  WM_T_ICH9,		WMP_F_COPPER },
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1184 	  "82801I (GT) 10/100 LAN Controller",
   1185 	  WM_T_ICH9,		WMP_F_COPPER },
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1187 	  "82801I (C) LAN Controller",
   1188 	  WM_T_ICH9,		WMP_F_COPPER },
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1190 	  "82801I mobile LAN Controller",
   1191 	  WM_T_ICH9,		WMP_F_COPPER },
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1193 	  "82801I mobile (V) LAN Controller",
   1194 	  WM_T_ICH9,		WMP_F_COPPER },
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1196 	  "82801I mobile (AMT) LAN Controller",
   1197 	  WM_T_ICH9,		WMP_F_COPPER },
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1199 	  "82567LM-4 LAN Controller",
   1200 	  WM_T_ICH9,		WMP_F_COPPER },
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1202 	  "82567LM-2 LAN Controller",
   1203 	  WM_T_ICH10,		WMP_F_COPPER },
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1205 	  "82567LF-2 LAN Controller",
   1206 	  WM_T_ICH10,		WMP_F_COPPER },
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1208 	  "82567LM-3 LAN Controller",
   1209 	  WM_T_ICH10,		WMP_F_COPPER },
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1211 	  "82567LF-3 LAN Controller",
   1212 	  WM_T_ICH10,		WMP_F_COPPER },
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1214 	  "82567V-2 LAN Controller",
   1215 	  WM_T_ICH10,		WMP_F_COPPER },
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1217 	  "82567V-3? LAN Controller",
   1218 	  WM_T_ICH10,		WMP_F_COPPER },
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1220 	  "HANKSVILLE LAN Controller",
   1221 	  WM_T_ICH10,		WMP_F_COPPER },
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1223 	  "PCH LAN (82577LM) Controller",
   1224 	  WM_T_PCH,		WMP_F_COPPER },
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1226 	  "PCH LAN (82577LC) Controller",
   1227 	  WM_T_PCH,		WMP_F_COPPER },
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1229 	  "PCH LAN (82578DM) Controller",
   1230 	  WM_T_PCH,		WMP_F_COPPER },
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1232 	  "PCH LAN (82578DC) Controller",
   1233 	  WM_T_PCH,		WMP_F_COPPER },
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1235 	  "PCH2 LAN (82579LM) Controller",
   1236 	  WM_T_PCH2,		WMP_F_COPPER },
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1238 	  "PCH2 LAN (82579V) Controller",
   1239 	  WM_T_PCH2,		WMP_F_COPPER },
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1241 	  "82575EB dual-1000baseT Ethernet",
   1242 	  WM_T_82575,		WMP_F_COPPER },
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1244 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1245 	  WM_T_82575,		WMP_F_SERDES },
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1247 	  "82575GB quad-1000baseT Ethernet",
   1248 	  WM_T_82575,		WMP_F_COPPER },
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1250 	  "82575GB quad-1000baseT Ethernet (PM)",
   1251 	  WM_T_82575,		WMP_F_COPPER },
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1253 	  "82576 1000BaseT Ethernet",
   1254 	  WM_T_82576,		WMP_F_COPPER },
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1256 	  "82576 1000BaseX Ethernet",
   1257 	  WM_T_82576,		WMP_F_FIBER },
   1258 
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1260 	  "82576 gigabit Ethernet (SERDES)",
   1261 	  WM_T_82576,		WMP_F_SERDES },
   1262 
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1264 	  "82576 quad-1000BaseT Ethernet",
   1265 	  WM_T_82576,		WMP_F_COPPER },
   1266 
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1268 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1269 	  WM_T_82576,		WMP_F_COPPER },
   1270 
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1272 	  "82576 gigabit Ethernet",
   1273 	  WM_T_82576,		WMP_F_COPPER },
   1274 
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1276 	  "82576 gigabit Ethernet (SERDES)",
   1277 	  WM_T_82576,		WMP_F_SERDES },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1279 	  "82576 quad-gigabit Ethernet (SERDES)",
   1280 	  WM_T_82576,		WMP_F_SERDES },
   1281 
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1283 	  "82580 1000BaseT Ethernet",
   1284 	  WM_T_82580,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1286 	  "82580 1000BaseX Ethernet",
   1287 	  WM_T_82580,		WMP_F_FIBER },
   1288 
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1290 	  "82580 1000BaseT Ethernet (SERDES)",
   1291 	  WM_T_82580,		WMP_F_SERDES },
   1292 
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1294 	  "82580 gigabit Ethernet (SGMII)",
   1295 	  WM_T_82580,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1297 	  "82580 dual-1000BaseT Ethernet",
   1298 	  WM_T_82580,		WMP_F_COPPER },
   1299 
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1301 	  "82580 quad-1000BaseX Ethernet",
   1302 	  WM_T_82580,		WMP_F_FIBER },
   1303 
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1305 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1306 	  WM_T_82580,		WMP_F_COPPER },
   1307 
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1309 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1310 	  WM_T_82580,		WMP_F_SERDES },
   1311 
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1313 	  "DH89XXCC 1000BASE-KX Ethernet",
   1314 	  WM_T_82580,		WMP_F_SERDES },
   1315 
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1317 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1318 	  WM_T_82580,		WMP_F_SERDES },
   1319 
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1321 	  "I350 Gigabit Network Connection",
   1322 	  WM_T_I350,		WMP_F_COPPER },
   1323 
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1325 	  "I350 Gigabit Fiber Network Connection",
   1326 	  WM_T_I350,		WMP_F_FIBER },
   1327 
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1329 	  "I350 Gigabit Backplane Connection",
   1330 	  WM_T_I350,		WMP_F_SERDES },
   1331 
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1333 	  "I350 Quad Port Gigabit Ethernet",
   1334 	  WM_T_I350,		WMP_F_SERDES },
   1335 
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1337 	  "I350 Gigabit Connection",
   1338 	  WM_T_I350,		WMP_F_COPPER },
   1339 
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1341 	  "I354 Gigabit Ethernet (KX)",
   1342 	  WM_T_I354,		WMP_F_SERDES },
   1343 
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1345 	  "I354 Gigabit Ethernet (SGMII)",
   1346 	  WM_T_I354,		WMP_F_COPPER },
   1347 
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1349 	  "I354 Gigabit Ethernet (2.5G)",
   1350 	  WM_T_I354,		WMP_F_COPPER },
   1351 
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1353 	  "I210-T1 Ethernet Server Adapter",
   1354 	  WM_T_I210,		WMP_F_COPPER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1357 	  "I210 Ethernet (Copper OEM)",
   1358 	  WM_T_I210,		WMP_F_COPPER },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1361 	  "I210 Ethernet (Copper IT)",
   1362 	  WM_T_I210,		WMP_F_COPPER },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1365 	  "I210 Ethernet (FLASH less)",
   1366 	  WM_T_I210,		WMP_F_COPPER },
   1367 
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1369 	  "I210 Gigabit Ethernet (Fiber)",
   1370 	  WM_T_I210,		WMP_F_FIBER },
   1371 
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1373 	  "I210 Gigabit Ethernet (SERDES)",
   1374 	  WM_T_I210,		WMP_F_SERDES },
   1375 
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1377 	  "I210 Gigabit Ethernet (FLASH less)",
   1378 	  WM_T_I210,		WMP_F_SERDES },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1381 	  "I210 Gigabit Ethernet (SGMII)",
   1382 	  WM_T_I210,		WMP_F_COPPER },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1385 	  "I211 Ethernet (COPPER)",
   1386 	  WM_T_I211,		WMP_F_COPPER },
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1388 	  "I217 V Ethernet Connection",
   1389 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1391 	  "I217 LM Ethernet Connection",
   1392 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1394 	  "I218 V Ethernet Connection",
   1395 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1397 	  "I218 V Ethernet Connection",
   1398 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1399 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1400 	  "I218 V Ethernet Connection",
   1401 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1403 	  "I218 LM Ethernet Connection",
   1404 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1406 	  "I218 LM Ethernet Connection",
   1407 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1409 	  "I218 LM Ethernet Connection",
   1410 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1411 #if 0
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1413 	  "I219 V Ethernet Connection",
   1414 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1415 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1416 	  "I219 V Ethernet Connection",
   1417 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1419 	  "I219 V Ethernet Connection",
   1420 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1422 	  "I219 V Ethernet Connection",
   1423 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1425 	  "I219 LM Ethernet Connection",
   1426 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1427 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1428 	  "I219 LM Ethernet Connection",
   1429 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1431 	  "I219 LM Ethernet Connection",
   1432 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1434 	  "I219 LM Ethernet Connection",
   1435 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1437 	  "I219 LM Ethernet Connection",
   1438 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1439 #endif
   1440 	{ 0,			0,
   1441 	  NULL,
   1442 	  0,			0 },
   1443 };
   1444 
   1445 /*
   1446  * Register read/write functions.
   1447  * Other than CSR_{READ|WRITE}().
   1448  */
   1449 
   1450 #if 0 /* Not currently used */
   1451 static inline uint32_t
   1452 wm_io_read(struct wm_softc *sc, int reg)
   1453 {
   1454 
   1455 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1456 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1457 }
   1458 #endif
   1459 
   1460 static inline void
   1461 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1462 {
   1463 
   1464 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1465 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1466 }
   1467 
   1468 static inline void
   1469 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1470     uint32_t data)
   1471 {
   1472 	uint32_t regval;
   1473 	int i;
   1474 
   1475 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1476 
   1477 	CSR_WRITE(sc, reg, regval);
   1478 
   1479 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1480 		delay(5);
   1481 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1482 			break;
   1483 	}
   1484 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1485 		aprint_error("%s: WARNING:"
   1486 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1487 		    device_xname(sc->sc_dev), reg);
   1488 	}
   1489 }
   1490 
   1491 static inline void
   1492 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1493 {
   1494 	wa->wa_low = htole32(v & 0xffffffffU);
   1495 	if (sizeof(bus_addr_t) == 8)
   1496 		wa->wa_high = htole32((uint64_t) v >> 32);
   1497 	else
   1498 		wa->wa_high = 0;
   1499 }
   1500 
   1501 /*
   1502  * Descriptor sync/init functions.
   1503  */
   1504 static inline void
   1505 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1506 {
   1507 	struct wm_softc *sc = txq->txq_sc;
   1508 
   1509 	/* If it will wrap around, sync to the end of the ring. */
   1510 	if ((start + num) > WM_NTXDESC(txq)) {
   1511 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1512 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1513 		    (WM_NTXDESC(txq) - start), ops);
   1514 		num -= (WM_NTXDESC(txq) - start);
   1515 		start = 0;
   1516 	}
   1517 
   1518 	/* Now sync whatever is left. */
   1519 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1520 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1521 }
   1522 
   1523 static inline void
   1524 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1525 {
   1526 	struct wm_softc *sc = rxq->rxq_sc;
   1527 
   1528 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1529 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1530 }
   1531 
   1532 static inline void
   1533 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1534 {
   1535 	struct wm_softc *sc = rxq->rxq_sc;
   1536 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1537 	struct mbuf *m = rxs->rxs_mbuf;
   1538 
   1539 	/*
   1540 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1541 	 * so that the payload after the Ethernet header is aligned
   1542 	 * to a 4-byte boundary.
   1543 
   1544 	 * XXX BRAINDAMAGE ALERT!
   1545 	 * The stupid chip uses the same size for every buffer, which
   1546 	 * is set in the Receive Control register.  We are using the 2K
   1547 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1548 	 * reason, we can't "scoot" packets longer than the standard
   1549 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1550 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1551 	 * the upper layer copy the headers.
   1552 	 */
   1553 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1554 
   1555 	if (sc->sc_type == WM_T_82574) {
   1556 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1557 		rxd->erx_data.erxd_addr =
   1558 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1559 		rxd->erx_data.erxd_dd = 0;
   1560 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1561 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1562 
   1563 		rxd->nqrx_data.nrxd_paddr =
   1564 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1565 		/* Currently, split header is not supported. */
   1566 		rxd->nqrx_data.nrxd_haddr = 0;
   1567 	} else {
   1568 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1569 
   1570 		wm_set_dma_addr(&rxd->wrx_addr,
   1571 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1572 		rxd->wrx_len = 0;
   1573 		rxd->wrx_cksum = 0;
   1574 		rxd->wrx_status = 0;
   1575 		rxd->wrx_errors = 0;
   1576 		rxd->wrx_special = 0;
   1577 	}
   1578 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1579 
   1580 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1581 }
   1582 
   1583 /*
   1584  * Device driver interface functions and commonly used functions.
   1585  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1586  */
   1587 
   1588 /* Lookup supported device table */
   1589 static const struct wm_product *
   1590 wm_lookup(const struct pci_attach_args *pa)
   1591 {
   1592 	const struct wm_product *wmp;
   1593 
   1594 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1595 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1596 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1597 			return wmp;
   1598 	}
   1599 	return NULL;
   1600 }
   1601 
   1602 /* The match function (ca_match) */
   1603 static int
   1604 wm_match(device_t parent, cfdata_t cf, void *aux)
   1605 {
   1606 	struct pci_attach_args *pa = aux;
   1607 
   1608 	if (wm_lookup(pa) != NULL)
   1609 		return 1;
   1610 
   1611 	return 0;
   1612 }
   1613 
   1614 /* The attach function (ca_attach) */
   1615 static void
   1616 wm_attach(device_t parent, device_t self, void *aux)
   1617 {
   1618 	struct wm_softc *sc = device_private(self);
   1619 	struct pci_attach_args *pa = aux;
   1620 	prop_dictionary_t dict;
   1621 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1622 	pci_chipset_tag_t pc = pa->pa_pc;
   1623 	int counts[PCI_INTR_TYPE_SIZE];
   1624 	pci_intr_type_t max_type;
   1625 	const char *eetype, *xname;
   1626 	bus_space_tag_t memt;
   1627 	bus_space_handle_t memh;
   1628 	bus_size_t memsize;
   1629 	int memh_valid;
   1630 	int i, error;
   1631 	const struct wm_product *wmp;
   1632 	prop_data_t ea;
   1633 	prop_number_t pn;
   1634 	uint8_t enaddr[ETHER_ADDR_LEN];
   1635 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1636 	pcireg_t preg, memtype;
   1637 	uint16_t eeprom_data, apme_mask;
   1638 	bool force_clear_smbi;
   1639 	uint32_t link_mode;
   1640 	uint32_t reg;
   1641 	void (*deferred_start_func)(struct ifnet *) = NULL;
   1642 
   1643 	sc->sc_dev = self;
   1644 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1645 	sc->sc_core_stopping = false;
   1646 
   1647 	wmp = wm_lookup(pa);
   1648 #ifdef DIAGNOSTIC
   1649 	if (wmp == NULL) {
   1650 		printf("\n");
   1651 		panic("wm_attach: impossible");
   1652 	}
   1653 #endif
   1654 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1655 
   1656 	sc->sc_pc = pa->pa_pc;
   1657 	sc->sc_pcitag = pa->pa_tag;
   1658 
   1659 	if (pci_dma64_available(pa))
   1660 		sc->sc_dmat = pa->pa_dmat64;
   1661 	else
   1662 		sc->sc_dmat = pa->pa_dmat;
   1663 
   1664 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1665 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1666 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1667 
   1668 	sc->sc_type = wmp->wmp_type;
   1669 
   1670 	/* Set default function pointers */
   1671 	sc->phy.acquire = wm_get_null;
   1672 	sc->phy.release = wm_put_null;
   1673 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1674 
   1675 	if (sc->sc_type < WM_T_82543) {
   1676 		if (sc->sc_rev < 2) {
   1677 			aprint_error_dev(sc->sc_dev,
   1678 			    "i82542 must be at least rev. 2\n");
   1679 			return;
   1680 		}
   1681 		if (sc->sc_rev < 3)
   1682 			sc->sc_type = WM_T_82542_2_0;
   1683 	}
   1684 
   1685 	/*
   1686 	 * Disable MSI for Errata:
   1687 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1688 	 *
   1689 	 *  82544: Errata 25
   1690 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1691 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1692 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1693 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1694 	 *
   1695 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1696 	 *
   1697 	 *  82571 & 82572: Errata 63
   1698 	 */
   1699 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1700 	    || (sc->sc_type == WM_T_82572))
   1701 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1702 
   1703 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1704 	    || (sc->sc_type == WM_T_82580)
   1705 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1706 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1707 		sc->sc_flags |= WM_F_NEWQUEUE;
   1708 
   1709 	/* Set device properties (mactype) */
   1710 	dict = device_properties(sc->sc_dev);
   1711 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1712 
   1713 	/*
   1714 	 * Map the device.  All devices support memory-mapped acccess,
   1715 	 * and it is really required for normal operation.
   1716 	 */
   1717 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1718 	switch (memtype) {
   1719 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1720 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1721 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1722 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1723 		break;
   1724 	default:
   1725 		memh_valid = 0;
   1726 		break;
   1727 	}
   1728 
   1729 	if (memh_valid) {
   1730 		sc->sc_st = memt;
   1731 		sc->sc_sh = memh;
   1732 		sc->sc_ss = memsize;
   1733 	} else {
   1734 		aprint_error_dev(sc->sc_dev,
   1735 		    "unable to map device registers\n");
   1736 		return;
   1737 	}
   1738 
   1739 	/*
   1740 	 * In addition, i82544 and later support I/O mapped indirect
   1741 	 * register access.  It is not desirable (nor supported in
   1742 	 * this driver) to use it for normal operation, though it is
   1743 	 * required to work around bugs in some chip versions.
   1744 	 */
   1745 	if (sc->sc_type >= WM_T_82544) {
   1746 		/* First we have to find the I/O BAR. */
   1747 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1748 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1749 			if (memtype == PCI_MAPREG_TYPE_IO)
   1750 				break;
   1751 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1752 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1753 				i += 4;	/* skip high bits, too */
   1754 		}
   1755 		if (i < PCI_MAPREG_END) {
   1756 			/*
   1757 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1758 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1759 			 * It's no problem because newer chips has no this
   1760 			 * bug.
   1761 			 *
   1762 			 * The i8254x doesn't apparently respond when the
   1763 			 * I/O BAR is 0, which looks somewhat like it's not
   1764 			 * been configured.
   1765 			 */
   1766 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1767 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1768 				aprint_error_dev(sc->sc_dev,
   1769 				    "WARNING: I/O BAR at zero.\n");
   1770 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1771 					0, &sc->sc_iot, &sc->sc_ioh,
   1772 					NULL, &sc->sc_ios) == 0) {
   1773 				sc->sc_flags |= WM_F_IOH_VALID;
   1774 			} else {
   1775 				aprint_error_dev(sc->sc_dev,
   1776 				    "WARNING: unable to map I/O space\n");
   1777 			}
   1778 		}
   1779 
   1780 	}
   1781 
   1782 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1783 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1784 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1785 	if (sc->sc_type < WM_T_82542_2_1)
   1786 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1787 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1788 
   1789 	/* power up chip */
   1790 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1791 	    NULL)) && error != EOPNOTSUPP) {
   1792 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1793 		return;
   1794 	}
   1795 
   1796 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1797 
   1798 	/* Allocation settings */
   1799 	max_type = PCI_INTR_TYPE_MSIX;
   1800 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1801 	counts[PCI_INTR_TYPE_MSI] = 1;
   1802 	counts[PCI_INTR_TYPE_INTX] = 1;
   1803 
   1804 alloc_retry:
   1805 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1806 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1807 		return;
   1808 	}
   1809 
   1810 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1811 		error = wm_setup_msix(sc);
   1812 		if (error) {
   1813 			pci_intr_release(pc, sc->sc_intrs,
   1814 			    counts[PCI_INTR_TYPE_MSIX]);
   1815 
   1816 			/* Setup for MSI: Disable MSI-X */
   1817 			max_type = PCI_INTR_TYPE_MSI;
   1818 			counts[PCI_INTR_TYPE_MSI] = 1;
   1819 			counts[PCI_INTR_TYPE_INTX] = 1;
   1820 			goto alloc_retry;
   1821 		}
   1822 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1823 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1824 		error = wm_setup_legacy(sc);
   1825 		if (error) {
   1826 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1827 			    counts[PCI_INTR_TYPE_MSI]);
   1828 
   1829 			/* The next try is for INTx: Disable MSI */
   1830 			max_type = PCI_INTR_TYPE_INTX;
   1831 			counts[PCI_INTR_TYPE_INTX] = 1;
   1832 			goto alloc_retry;
   1833 		}
   1834 	} else {
   1835 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1836 		error = wm_setup_legacy(sc);
   1837 		if (error) {
   1838 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1839 			    counts[PCI_INTR_TYPE_INTX]);
   1840 			return;
   1841 		}
   1842 	}
   1843 
   1844 	/*
   1845 	 * Check the function ID (unit number of the chip).
   1846 	 */
   1847 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1848 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1849 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1850 	    || (sc->sc_type == WM_T_82580)
   1851 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1852 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1853 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1854 	else
   1855 		sc->sc_funcid = 0;
   1856 
   1857 	/*
   1858 	 * Determine a few things about the bus we're connected to.
   1859 	 */
   1860 	if (sc->sc_type < WM_T_82543) {
   1861 		/* We don't really know the bus characteristics here. */
   1862 		sc->sc_bus_speed = 33;
   1863 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1864 		/*
   1865 		 * CSA (Communication Streaming Architecture) is about as fast
   1866 		 * a 32-bit 66MHz PCI Bus.
   1867 		 */
   1868 		sc->sc_flags |= WM_F_CSA;
   1869 		sc->sc_bus_speed = 66;
   1870 		aprint_verbose_dev(sc->sc_dev,
   1871 		    "Communication Streaming Architecture\n");
   1872 		if (sc->sc_type == WM_T_82547) {
   1873 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1874 			callout_setfunc(&sc->sc_txfifo_ch,
   1875 					wm_82547_txfifo_stall, sc);
   1876 			aprint_verbose_dev(sc->sc_dev,
   1877 			    "using 82547 Tx FIFO stall work-around\n");
   1878 		}
   1879 	} else if (sc->sc_type >= WM_T_82571) {
   1880 		sc->sc_flags |= WM_F_PCIE;
   1881 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1882 		    && (sc->sc_type != WM_T_ICH10)
   1883 		    && (sc->sc_type != WM_T_PCH)
   1884 		    && (sc->sc_type != WM_T_PCH2)
   1885 		    && (sc->sc_type != WM_T_PCH_LPT)
   1886 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1887 			/* ICH* and PCH* have no PCIe capability registers */
   1888 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1889 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1890 				NULL) == 0)
   1891 				aprint_error_dev(sc->sc_dev,
   1892 				    "unable to find PCIe capability\n");
   1893 		}
   1894 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1895 	} else {
   1896 		reg = CSR_READ(sc, WMREG_STATUS);
   1897 		if (reg & STATUS_BUS64)
   1898 			sc->sc_flags |= WM_F_BUS64;
   1899 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1900 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1901 
   1902 			sc->sc_flags |= WM_F_PCIX;
   1903 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1904 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1905 				aprint_error_dev(sc->sc_dev,
   1906 				    "unable to find PCIX capability\n");
   1907 			else if (sc->sc_type != WM_T_82545_3 &&
   1908 				 sc->sc_type != WM_T_82546_3) {
   1909 				/*
   1910 				 * Work around a problem caused by the BIOS
   1911 				 * setting the max memory read byte count
   1912 				 * incorrectly.
   1913 				 */
   1914 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1915 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1916 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1917 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1918 
   1919 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1920 				    PCIX_CMD_BYTECNT_SHIFT;
   1921 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1922 				    PCIX_STATUS_MAXB_SHIFT;
   1923 				if (bytecnt > maxb) {
   1924 					aprint_verbose_dev(sc->sc_dev,
   1925 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1926 					    512 << bytecnt, 512 << maxb);
   1927 					pcix_cmd = (pcix_cmd &
   1928 					    ~PCIX_CMD_BYTECNT_MASK) |
   1929 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1930 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1931 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1932 					    pcix_cmd);
   1933 				}
   1934 			}
   1935 		}
   1936 		/*
   1937 		 * The quad port adapter is special; it has a PCIX-PCIX
   1938 		 * bridge on the board, and can run the secondary bus at
   1939 		 * a higher speed.
   1940 		 */
   1941 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1942 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1943 								      : 66;
   1944 		} else if (sc->sc_flags & WM_F_PCIX) {
   1945 			switch (reg & STATUS_PCIXSPD_MASK) {
   1946 			case STATUS_PCIXSPD_50_66:
   1947 				sc->sc_bus_speed = 66;
   1948 				break;
   1949 			case STATUS_PCIXSPD_66_100:
   1950 				sc->sc_bus_speed = 100;
   1951 				break;
   1952 			case STATUS_PCIXSPD_100_133:
   1953 				sc->sc_bus_speed = 133;
   1954 				break;
   1955 			default:
   1956 				aprint_error_dev(sc->sc_dev,
   1957 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1958 				    reg & STATUS_PCIXSPD_MASK);
   1959 				sc->sc_bus_speed = 66;
   1960 				break;
   1961 			}
   1962 		} else
   1963 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1964 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1965 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1966 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1967 	}
   1968 
   1969 	/* clear interesting stat counters */
   1970 	CSR_READ(sc, WMREG_COLC);
   1971 	CSR_READ(sc, WMREG_RXERRC);
   1972 
   1973 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1974 	    || (sc->sc_type >= WM_T_ICH8))
   1975 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1976 	if (sc->sc_type >= WM_T_ICH8)
   1977 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1978 
   1979 	/* Set PHY, NVM mutex related stuff */
   1980 	switch (sc->sc_type) {
   1981 	case WM_T_82542_2_0:
   1982 	case WM_T_82542_2_1:
   1983 	case WM_T_82543:
   1984 	case WM_T_82544:
   1985 		/* Microwire */
   1986 		sc->sc_nvm_wordsize = 64;
   1987 		sc->sc_nvm_addrbits = 6;
   1988 		break;
   1989 	case WM_T_82540:
   1990 	case WM_T_82545:
   1991 	case WM_T_82545_3:
   1992 	case WM_T_82546:
   1993 	case WM_T_82546_3:
   1994 		/* Microwire */
   1995 		reg = CSR_READ(sc, WMREG_EECD);
   1996 		if (reg & EECD_EE_SIZE) {
   1997 			sc->sc_nvm_wordsize = 256;
   1998 			sc->sc_nvm_addrbits = 8;
   1999 		} else {
   2000 			sc->sc_nvm_wordsize = 64;
   2001 			sc->sc_nvm_addrbits = 6;
   2002 		}
   2003 		sc->sc_flags |= WM_F_LOCK_EECD;
   2004 		break;
   2005 	case WM_T_82541:
   2006 	case WM_T_82541_2:
   2007 	case WM_T_82547:
   2008 	case WM_T_82547_2:
   2009 		sc->sc_flags |= WM_F_LOCK_EECD;
   2010 		reg = CSR_READ(sc, WMREG_EECD);
   2011 		if (reg & EECD_EE_TYPE) {
   2012 			/* SPI */
   2013 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2014 			wm_nvm_set_addrbits_size_eecd(sc);
   2015 		} else {
   2016 			/* Microwire */
   2017 			if ((reg & EECD_EE_ABITS) != 0) {
   2018 				sc->sc_nvm_wordsize = 256;
   2019 				sc->sc_nvm_addrbits = 8;
   2020 			} else {
   2021 				sc->sc_nvm_wordsize = 64;
   2022 				sc->sc_nvm_addrbits = 6;
   2023 			}
   2024 		}
   2025 		break;
   2026 	case WM_T_82571:
   2027 	case WM_T_82572:
   2028 		/* SPI */
   2029 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2030 		wm_nvm_set_addrbits_size_eecd(sc);
   2031 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2032 		sc->phy.acquire = wm_get_swsm_semaphore;
   2033 		sc->phy.release = wm_put_swsm_semaphore;
   2034 		break;
   2035 	case WM_T_82573:
   2036 	case WM_T_82574:
   2037 	case WM_T_82583:
   2038 		if (sc->sc_type == WM_T_82573) {
   2039 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2040 			sc->phy.acquire = wm_get_swsm_semaphore;
   2041 			sc->phy.release = wm_put_swsm_semaphore;
   2042 		} else {
   2043 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2044 			/* Both PHY and NVM use the same semaphore. */
   2045 			sc->phy.acquire
   2046 			    = wm_get_swfwhw_semaphore;
   2047 			sc->phy.release
   2048 			    = wm_put_swfwhw_semaphore;
   2049 		}
   2050 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2051 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2052 			sc->sc_nvm_wordsize = 2048;
   2053 		} else {
   2054 			/* SPI */
   2055 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2056 			wm_nvm_set_addrbits_size_eecd(sc);
   2057 		}
   2058 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2059 		break;
   2060 	case WM_T_82575:
   2061 	case WM_T_82576:
   2062 	case WM_T_82580:
   2063 	case WM_T_I350:
   2064 	case WM_T_I354:
   2065 	case WM_T_80003:
   2066 		/* SPI */
   2067 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2068 		wm_nvm_set_addrbits_size_eecd(sc);
   2069 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2070 		    | WM_F_LOCK_SWSM;
   2071 		sc->phy.acquire = wm_get_phy_82575;
   2072 		sc->phy.release = wm_put_phy_82575;
   2073 		break;
   2074 	case WM_T_ICH8:
   2075 	case WM_T_ICH9:
   2076 	case WM_T_ICH10:
   2077 	case WM_T_PCH:
   2078 	case WM_T_PCH2:
   2079 	case WM_T_PCH_LPT:
   2080 		/* FLASH */
   2081 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2082 		sc->sc_nvm_wordsize = 2048;
   2083 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2084 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2085 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2086 			aprint_error_dev(sc->sc_dev,
   2087 			    "can't map FLASH registers\n");
   2088 			goto out;
   2089 		}
   2090 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2091 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2092 		    ICH_FLASH_SECTOR_SIZE;
   2093 		sc->sc_ich8_flash_bank_size =
   2094 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2095 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2096 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2097 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2098 		sc->sc_flashreg_offset = 0;
   2099 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2100 		sc->phy.release = wm_put_swflag_ich8lan;
   2101 		break;
   2102 	case WM_T_PCH_SPT:
   2103 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2104 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2105 		sc->sc_flasht = sc->sc_st;
   2106 		sc->sc_flashh = sc->sc_sh;
   2107 		sc->sc_ich8_flash_base = 0;
   2108 		sc->sc_nvm_wordsize =
   2109 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2110 			* NVM_SIZE_MULTIPLIER;
   2111 		/* It is size in bytes, we want words */
   2112 		sc->sc_nvm_wordsize /= 2;
   2113 		/* assume 2 banks */
   2114 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2115 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2116 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2117 		sc->phy.release = wm_put_swflag_ich8lan;
   2118 		break;
   2119 	case WM_T_I210:
   2120 	case WM_T_I211:
   2121 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2122 			wm_nvm_set_addrbits_size_eecd(sc);
   2123 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2124 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2125 		} else {
   2126 			sc->sc_nvm_wordsize = INVM_SIZE;
   2127 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2128 		}
   2129 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2130 		sc->phy.acquire = wm_get_phy_82575;
   2131 		sc->phy.release = wm_put_phy_82575;
   2132 		break;
   2133 	default:
   2134 		break;
   2135 	}
   2136 
   2137 	/* Reset the chip to a known state. */
   2138 	wm_reset(sc);
   2139 
   2140 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2141 	switch (sc->sc_type) {
   2142 	case WM_T_82571:
   2143 	case WM_T_82572:
   2144 		reg = CSR_READ(sc, WMREG_SWSM2);
   2145 		if ((reg & SWSM2_LOCK) == 0) {
   2146 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2147 			force_clear_smbi = true;
   2148 		} else
   2149 			force_clear_smbi = false;
   2150 		break;
   2151 	case WM_T_82573:
   2152 	case WM_T_82574:
   2153 	case WM_T_82583:
   2154 		force_clear_smbi = true;
   2155 		break;
   2156 	default:
   2157 		force_clear_smbi = false;
   2158 		break;
   2159 	}
   2160 	if (force_clear_smbi) {
   2161 		reg = CSR_READ(sc, WMREG_SWSM);
   2162 		if ((reg & SWSM_SMBI) != 0)
   2163 			aprint_error_dev(sc->sc_dev,
   2164 			    "Please update the Bootagent\n");
   2165 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2166 	}
   2167 
   2168 	/*
   2169 	 * Defer printing the EEPROM type until after verifying the checksum
   2170 	 * This allows the EEPROM type to be printed correctly in the case
   2171 	 * that no EEPROM is attached.
   2172 	 */
   2173 	/*
   2174 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2175 	 * this for later, so we can fail future reads from the EEPROM.
   2176 	 */
   2177 	if (wm_nvm_validate_checksum(sc)) {
   2178 		/*
   2179 		 * Read twice again because some PCI-e parts fail the
   2180 		 * first check due to the link being in sleep state.
   2181 		 */
   2182 		if (wm_nvm_validate_checksum(sc))
   2183 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2184 	}
   2185 
   2186 	/* Set device properties (macflags) */
   2187 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2188 
   2189 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2190 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2191 	else {
   2192 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2193 		    sc->sc_nvm_wordsize);
   2194 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2195 			aprint_verbose("iNVM");
   2196 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2197 			aprint_verbose("FLASH(HW)");
   2198 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2199 			aprint_verbose("FLASH");
   2200 		else {
   2201 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2202 				eetype = "SPI";
   2203 			else
   2204 				eetype = "MicroWire";
   2205 			aprint_verbose("(%d address bits) %s EEPROM",
   2206 			    sc->sc_nvm_addrbits, eetype);
   2207 		}
   2208 	}
   2209 	wm_nvm_version(sc);
   2210 	aprint_verbose("\n");
   2211 
   2212 	/* Check for I21[01] PLL workaround */
   2213 	if (sc->sc_type == WM_T_I210)
   2214 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2215 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2216 		/* NVM image release 3.25 has a workaround */
   2217 		if ((sc->sc_nvm_ver_major < 3)
   2218 		    || ((sc->sc_nvm_ver_major == 3)
   2219 			&& (sc->sc_nvm_ver_minor < 25))) {
   2220 			aprint_verbose_dev(sc->sc_dev,
   2221 			    "ROM image version %d.%d is older than 3.25\n",
   2222 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2223 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2224 		}
   2225 	}
   2226 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2227 		wm_pll_workaround_i210(sc);
   2228 
   2229 	wm_get_wakeup(sc);
   2230 
   2231 	/* Non-AMT based hardware can now take control from firmware */
   2232 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2233 		wm_get_hw_control(sc);
   2234 
   2235 	/*
   2236 	 * Read the Ethernet address from the EEPROM, if not first found
   2237 	 * in device properties.
   2238 	 */
   2239 	ea = prop_dictionary_get(dict, "mac-address");
   2240 	if (ea != NULL) {
   2241 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2242 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2243 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2244 	} else {
   2245 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2246 			aprint_error_dev(sc->sc_dev,
   2247 			    "unable to read Ethernet address\n");
   2248 			goto out;
   2249 		}
   2250 	}
   2251 
   2252 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2253 	    ether_sprintf(enaddr));
   2254 
   2255 	/*
   2256 	 * Read the config info from the EEPROM, and set up various
   2257 	 * bits in the control registers based on their contents.
   2258 	 */
   2259 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2260 	if (pn != NULL) {
   2261 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2262 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2263 	} else {
   2264 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2265 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2266 			goto out;
   2267 		}
   2268 	}
   2269 
   2270 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2271 	if (pn != NULL) {
   2272 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2273 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2274 	} else {
   2275 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2276 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2277 			goto out;
   2278 		}
   2279 	}
   2280 
   2281 	/* check for WM_F_WOL */
   2282 	switch (sc->sc_type) {
   2283 	case WM_T_82542_2_0:
   2284 	case WM_T_82542_2_1:
   2285 	case WM_T_82543:
   2286 		/* dummy? */
   2287 		eeprom_data = 0;
   2288 		apme_mask = NVM_CFG3_APME;
   2289 		break;
   2290 	case WM_T_82544:
   2291 		apme_mask = NVM_CFG2_82544_APM_EN;
   2292 		eeprom_data = cfg2;
   2293 		break;
   2294 	case WM_T_82546:
   2295 	case WM_T_82546_3:
   2296 	case WM_T_82571:
   2297 	case WM_T_82572:
   2298 	case WM_T_82573:
   2299 	case WM_T_82574:
   2300 	case WM_T_82583:
   2301 	case WM_T_80003:
   2302 	default:
   2303 		apme_mask = NVM_CFG3_APME;
   2304 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2305 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2306 		break;
   2307 	case WM_T_82575:
   2308 	case WM_T_82576:
   2309 	case WM_T_82580:
   2310 	case WM_T_I350:
   2311 	case WM_T_I354: /* XXX ok? */
   2312 	case WM_T_ICH8:
   2313 	case WM_T_ICH9:
   2314 	case WM_T_ICH10:
   2315 	case WM_T_PCH:
   2316 	case WM_T_PCH2:
   2317 	case WM_T_PCH_LPT:
   2318 	case WM_T_PCH_SPT:
   2319 		/* XXX The funcid should be checked on some devices */
   2320 		apme_mask = WUC_APME;
   2321 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2322 		break;
   2323 	}
   2324 
   2325 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2326 	if ((eeprom_data & apme_mask) != 0)
   2327 		sc->sc_flags |= WM_F_WOL;
   2328 #ifdef WM_DEBUG
   2329 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2330 		printf("WOL\n");
   2331 #endif
   2332 
   2333 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2334 		/* Check NVM for autonegotiation */
   2335 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2336 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2337 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2338 		}
   2339 	}
   2340 
   2341 	/*
   2342 	 * XXX need special handling for some multiple port cards
   2343 	 * to disable a paticular port.
   2344 	 */
   2345 
   2346 	if (sc->sc_type >= WM_T_82544) {
   2347 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2348 		if (pn != NULL) {
   2349 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2350 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2351 		} else {
   2352 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2353 				aprint_error_dev(sc->sc_dev,
   2354 				    "unable to read SWDPIN\n");
   2355 				goto out;
   2356 			}
   2357 		}
   2358 	}
   2359 
   2360 	if (cfg1 & NVM_CFG1_ILOS)
   2361 		sc->sc_ctrl |= CTRL_ILOS;
   2362 
   2363 	/*
   2364 	 * XXX
   2365 	 * This code isn't correct because pin 2 and 3 are located
   2366 	 * in different position on newer chips. Check all datasheet.
   2367 	 *
   2368 	 * Until resolve this problem, check if a chip < 82580
   2369 	 */
   2370 	if (sc->sc_type <= WM_T_82580) {
   2371 		if (sc->sc_type >= WM_T_82544) {
   2372 			sc->sc_ctrl |=
   2373 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2374 			    CTRL_SWDPIO_SHIFT;
   2375 			sc->sc_ctrl |=
   2376 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2377 			    CTRL_SWDPINS_SHIFT;
   2378 		} else {
   2379 			sc->sc_ctrl |=
   2380 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2381 			    CTRL_SWDPIO_SHIFT;
   2382 		}
   2383 	}
   2384 
   2385 	/* XXX For other than 82580? */
   2386 	if (sc->sc_type == WM_T_82580) {
   2387 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2388 		if (nvmword & __BIT(13))
   2389 			sc->sc_ctrl |= CTRL_ILOS;
   2390 	}
   2391 
   2392 #if 0
   2393 	if (sc->sc_type >= WM_T_82544) {
   2394 		if (cfg1 & NVM_CFG1_IPS0)
   2395 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2396 		if (cfg1 & NVM_CFG1_IPS1)
   2397 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2398 		sc->sc_ctrl_ext |=
   2399 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2400 		    CTRL_EXT_SWDPIO_SHIFT;
   2401 		sc->sc_ctrl_ext |=
   2402 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2403 		    CTRL_EXT_SWDPINS_SHIFT;
   2404 	} else {
   2405 		sc->sc_ctrl_ext |=
   2406 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2407 		    CTRL_EXT_SWDPIO_SHIFT;
   2408 	}
   2409 #endif
   2410 
   2411 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2412 #if 0
   2413 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2414 #endif
   2415 
   2416 	if (sc->sc_type == WM_T_PCH) {
   2417 		uint16_t val;
   2418 
   2419 		/* Save the NVM K1 bit setting */
   2420 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2421 
   2422 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2423 			sc->sc_nvm_k1_enabled = 1;
   2424 		else
   2425 			sc->sc_nvm_k1_enabled = 0;
   2426 	}
   2427 
   2428 	/*
   2429 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2430 	 * media structures accordingly.
   2431 	 */
   2432 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2433 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2434 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2435 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2436 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2437 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2438 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2439 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2440 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2441 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2442 	    || (sc->sc_type ==WM_T_I211)) {
   2443 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2444 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2445 		switch (link_mode) {
   2446 		case CTRL_EXT_LINK_MODE_1000KX:
   2447 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2448 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2449 			break;
   2450 		case CTRL_EXT_LINK_MODE_SGMII:
   2451 			if (wm_sgmii_uses_mdio(sc)) {
   2452 				aprint_verbose_dev(sc->sc_dev,
   2453 				    "SGMII(MDIO)\n");
   2454 				sc->sc_flags |= WM_F_SGMII;
   2455 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2456 				break;
   2457 			}
   2458 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2459 			/*FALLTHROUGH*/
   2460 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2461 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2462 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2463 				if (link_mode
   2464 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2465 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2466 					sc->sc_flags |= WM_F_SGMII;
   2467 				} else {
   2468 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2469 					aprint_verbose_dev(sc->sc_dev,
   2470 					    "SERDES\n");
   2471 				}
   2472 				break;
   2473 			}
   2474 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2475 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2476 
   2477 			/* Change current link mode setting */
   2478 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2479 			switch (sc->sc_mediatype) {
   2480 			case WM_MEDIATYPE_COPPER:
   2481 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2482 				break;
   2483 			case WM_MEDIATYPE_SERDES:
   2484 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2485 				break;
   2486 			default:
   2487 				break;
   2488 			}
   2489 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2490 			break;
   2491 		case CTRL_EXT_LINK_MODE_GMII:
   2492 		default:
   2493 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2494 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2495 			break;
   2496 		}
   2497 
   2498 		reg &= ~CTRL_EXT_I2C_ENA;
   2499 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2500 			reg |= CTRL_EXT_I2C_ENA;
   2501 		else
   2502 			reg &= ~CTRL_EXT_I2C_ENA;
   2503 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2504 
   2505 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2506 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2507 		else
   2508 			wm_tbi_mediainit(sc);
   2509 	} else if (sc->sc_type < WM_T_82543 ||
   2510 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2511 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2512 			aprint_error_dev(sc->sc_dev,
   2513 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2514 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2515 		}
   2516 		wm_tbi_mediainit(sc);
   2517 	} else {
   2518 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2519 			aprint_error_dev(sc->sc_dev,
   2520 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2521 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2522 		}
   2523 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2524 	}
   2525 
   2526 	ifp = &sc->sc_ethercom.ec_if;
   2527 	xname = device_xname(sc->sc_dev);
   2528 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2529 	ifp->if_softc = sc;
   2530 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2531 	ifp->if_extflags = IFEF_START_MPSAFE;
   2532 	ifp->if_ioctl = wm_ioctl;
   2533 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2534 		ifp->if_start = wm_nq_start;
   2535 		if (sc->sc_nqueues > 1) {
   2536 			ifp->if_transmit = wm_nq_transmit;
   2537 			deferred_start_func = wm_deferred_start;
   2538 		}
   2539 	} else {
   2540 		ifp->if_start = wm_start;
   2541 		if (sc->sc_nqueues > 1) {
   2542 			ifp->if_transmit = wm_transmit;
   2543 			deferred_start_func = wm_deferred_start;
   2544 		}
   2545 	}
   2546 	ifp->if_watchdog = wm_watchdog;
   2547 	ifp->if_init = wm_init;
   2548 	ifp->if_stop = wm_stop;
   2549 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2550 	IFQ_SET_READY(&ifp->if_snd);
   2551 
   2552 	/* Check for jumbo frame */
   2553 	switch (sc->sc_type) {
   2554 	case WM_T_82573:
   2555 		/* XXX limited to 9234 if ASPM is disabled */
   2556 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2557 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2558 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2559 		break;
   2560 	case WM_T_82571:
   2561 	case WM_T_82572:
   2562 	case WM_T_82574:
   2563 	case WM_T_82575:
   2564 	case WM_T_82576:
   2565 	case WM_T_82580:
   2566 	case WM_T_I350:
   2567 	case WM_T_I354: /* XXXX ok? */
   2568 	case WM_T_I210:
   2569 	case WM_T_I211:
   2570 	case WM_T_80003:
   2571 	case WM_T_ICH9:
   2572 	case WM_T_ICH10:
   2573 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2574 	case WM_T_PCH_LPT:
   2575 	case WM_T_PCH_SPT:
   2576 		/* XXX limited to 9234 */
   2577 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2578 		break;
   2579 	case WM_T_PCH:
   2580 		/* XXX limited to 4096 */
   2581 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2582 		break;
   2583 	case WM_T_82542_2_0:
   2584 	case WM_T_82542_2_1:
   2585 	case WM_T_82583:
   2586 	case WM_T_ICH8:
   2587 		/* No support for jumbo frame */
   2588 		break;
   2589 	default:
   2590 		/* ETHER_MAX_LEN_JUMBO */
   2591 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2592 		break;
   2593 	}
   2594 
   2595 	/* If we're a i82543 or greater, we can support VLANs. */
   2596 	if (sc->sc_type >= WM_T_82543)
   2597 		sc->sc_ethercom.ec_capabilities |=
   2598 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2599 
   2600 	/*
   2601 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2602 	 * on i82543 and later.
   2603 	 */
   2604 	if (sc->sc_type >= WM_T_82543) {
   2605 		ifp->if_capabilities |=
   2606 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2607 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2608 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2609 		    IFCAP_CSUM_TCPv6_Tx |
   2610 		    IFCAP_CSUM_UDPv6_Tx;
   2611 	}
   2612 
   2613 	/*
   2614 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2615 	 *
   2616 	 *	82541GI (8086:1076) ... no
   2617 	 *	82572EI (8086:10b9) ... yes
   2618 	 */
   2619 	if (sc->sc_type >= WM_T_82571) {
   2620 		ifp->if_capabilities |=
   2621 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2622 	}
   2623 
   2624 	/*
   2625 	 * If we're a i82544 or greater (except i82547), we can do
   2626 	 * TCP segmentation offload.
   2627 	 */
   2628 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2629 		ifp->if_capabilities |= IFCAP_TSOv4;
   2630 	}
   2631 
   2632 	if (sc->sc_type >= WM_T_82571) {
   2633 		ifp->if_capabilities |= IFCAP_TSOv6;
   2634 	}
   2635 
   2636 #ifdef WM_MPSAFE
   2637 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2638 #else
   2639 	sc->sc_core_lock = NULL;
   2640 #endif
   2641 
   2642 	/* Attach the interface. */
   2643 	if_initialize(ifp);
   2644 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2645 	if_deferred_start_init(ifp, deferred_start_func);
   2646 	ether_ifattach(ifp, enaddr);
   2647 	if_register(ifp);
   2648 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2649 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2650 			  RND_FLAG_DEFAULT);
   2651 
   2652 #ifdef WM_EVENT_COUNTERS
   2653 	/* Attach event counters. */
   2654 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2655 	    NULL, xname, "linkintr");
   2656 
   2657 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2658 	    NULL, xname, "tx_xoff");
   2659 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2660 	    NULL, xname, "tx_xon");
   2661 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2662 	    NULL, xname, "rx_xoff");
   2663 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2664 	    NULL, xname, "rx_xon");
   2665 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2666 	    NULL, xname, "rx_macctl");
   2667 #endif /* WM_EVENT_COUNTERS */
   2668 
   2669 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2670 		pmf_class_network_register(self, ifp);
   2671 	else
   2672 		aprint_error_dev(self, "couldn't establish power handler\n");
   2673 
   2674 	sc->sc_flags |= WM_F_ATTACHED;
   2675  out:
   2676 	return;
   2677 }
   2678 
   2679 /* The detach function (ca_detach) */
   2680 static int
   2681 wm_detach(device_t self, int flags __unused)
   2682 {
   2683 	struct wm_softc *sc = device_private(self);
   2684 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2685 	int i;
   2686 
   2687 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2688 		return 0;
   2689 
   2690 	/* Stop the interface. Callouts are stopped in it. */
   2691 	wm_stop(ifp, 1);
   2692 
   2693 	pmf_device_deregister(self);
   2694 
   2695 	/* Tell the firmware about the release */
   2696 	WM_CORE_LOCK(sc);
   2697 	wm_release_manageability(sc);
   2698 	wm_release_hw_control(sc);
   2699 	wm_enable_wakeup(sc);
   2700 	WM_CORE_UNLOCK(sc);
   2701 
   2702 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2703 
   2704 	/* Delete all remaining media. */
   2705 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2706 
   2707 	ether_ifdetach(ifp);
   2708 	if_detach(ifp);
   2709 	if_percpuq_destroy(sc->sc_ipq);
   2710 
   2711 	/* Unload RX dmamaps and free mbufs */
   2712 	for (i = 0; i < sc->sc_nqueues; i++) {
   2713 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2714 		mutex_enter(rxq->rxq_lock);
   2715 		wm_rxdrain(rxq);
   2716 		mutex_exit(rxq->rxq_lock);
   2717 	}
   2718 	/* Must unlock here */
   2719 
   2720 	/* Disestablish the interrupt handler */
   2721 	for (i = 0; i < sc->sc_nintrs; i++) {
   2722 		if (sc->sc_ihs[i] != NULL) {
   2723 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2724 			sc->sc_ihs[i] = NULL;
   2725 		}
   2726 	}
   2727 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2728 
   2729 	wm_free_txrx_queues(sc);
   2730 
   2731 	/* Unmap the registers */
   2732 	if (sc->sc_ss) {
   2733 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2734 		sc->sc_ss = 0;
   2735 	}
   2736 	if (sc->sc_ios) {
   2737 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2738 		sc->sc_ios = 0;
   2739 	}
   2740 	if (sc->sc_flashs) {
   2741 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2742 		sc->sc_flashs = 0;
   2743 	}
   2744 
   2745 	if (sc->sc_core_lock)
   2746 		mutex_obj_free(sc->sc_core_lock);
   2747 	if (sc->sc_ich_phymtx)
   2748 		mutex_obj_free(sc->sc_ich_phymtx);
   2749 	if (sc->sc_ich_nvmmtx)
   2750 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2751 
   2752 	return 0;
   2753 }
   2754 
   2755 static bool
   2756 wm_suspend(device_t self, const pmf_qual_t *qual)
   2757 {
   2758 	struct wm_softc *sc = device_private(self);
   2759 
   2760 	wm_release_manageability(sc);
   2761 	wm_release_hw_control(sc);
   2762 	wm_enable_wakeup(sc);
   2763 
   2764 	return true;
   2765 }
   2766 
   2767 static bool
   2768 wm_resume(device_t self, const pmf_qual_t *qual)
   2769 {
   2770 	struct wm_softc *sc = device_private(self);
   2771 
   2772 	wm_init_manageability(sc);
   2773 
   2774 	return true;
   2775 }
   2776 
   2777 /*
   2778  * wm_watchdog:		[ifnet interface function]
   2779  *
   2780  *	Watchdog timer handler.
   2781  */
   2782 static void
   2783 wm_watchdog(struct ifnet *ifp)
   2784 {
   2785 	int qid;
   2786 	struct wm_softc *sc = ifp->if_softc;
   2787 
   2788 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2789 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2790 
   2791 		wm_watchdog_txq(ifp, txq);
   2792 	}
   2793 
   2794 	/* Reset the interface. */
   2795 	(void) wm_init(ifp);
   2796 
   2797 	/*
   2798 	 * There are still some upper layer processing which call
   2799 	 * ifp->if_start(). e.g. ALTQ
   2800 	 */
   2801 	/* Try to get more packets going. */
   2802 	ifp->if_start(ifp);
   2803 }
   2804 
   2805 static void
   2806 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2807 {
   2808 	struct wm_softc *sc = ifp->if_softc;
   2809 
   2810 	/*
   2811 	 * Since we're using delayed interrupts, sweep up
   2812 	 * before we report an error.
   2813 	 */
   2814 	mutex_enter(txq->txq_lock);
   2815 	wm_txeof(sc, txq);
   2816 	mutex_exit(txq->txq_lock);
   2817 
   2818 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2819 #ifdef WM_DEBUG
   2820 		int i, j;
   2821 		struct wm_txsoft *txs;
   2822 #endif
   2823 		log(LOG_ERR,
   2824 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2825 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2826 		    txq->txq_next);
   2827 		ifp->if_oerrors++;
   2828 #ifdef WM_DEBUG
   2829 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2830 		    i = WM_NEXTTXS(txq, i)) {
   2831 		    txs = &txq->txq_soft[i];
   2832 		    printf("txs %d tx %d -> %d\n",
   2833 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2834 		    for (j = txs->txs_firstdesc; ;
   2835 			j = WM_NEXTTX(txq, j)) {
   2836 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2837 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2838 			printf("\t %#08x%08x\n",
   2839 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2840 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2841 			if (j == txs->txs_lastdesc)
   2842 				break;
   2843 			}
   2844 		}
   2845 #endif
   2846 	}
   2847 }
   2848 
   2849 /*
   2850  * wm_tick:
   2851  *
   2852  *	One second timer, used to check link status, sweep up
   2853  *	completed transmit jobs, etc.
   2854  */
   2855 static void
   2856 wm_tick(void *arg)
   2857 {
   2858 	struct wm_softc *sc = arg;
   2859 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2860 #ifndef WM_MPSAFE
   2861 	int s = splnet();
   2862 #endif
   2863 
   2864 	WM_CORE_LOCK(sc);
   2865 
   2866 	if (sc->sc_core_stopping)
   2867 		goto out;
   2868 
   2869 	if (sc->sc_type >= WM_T_82542_2_1) {
   2870 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2871 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2872 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2873 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2874 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2875 	}
   2876 
   2877 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2878 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2879 	    + CSR_READ(sc, WMREG_CRCERRS)
   2880 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2881 	    + CSR_READ(sc, WMREG_SYMERRC)
   2882 	    + CSR_READ(sc, WMREG_RXERRC)
   2883 	    + CSR_READ(sc, WMREG_SEC)
   2884 	    + CSR_READ(sc, WMREG_CEXTERR)
   2885 	    + CSR_READ(sc, WMREG_RLEC);
   2886 	/*
   2887 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2888 	 * memory. It does not mean the number of dropped packet. Because
   2889 	 * ethernet controller can receive packets in such case if there is
   2890 	 * space in phy's FIFO.
   2891 	 *
   2892 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2893 	 * own EVCNT instead of if_iqdrops.
   2894 	 */
   2895 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2896 
   2897 	if (sc->sc_flags & WM_F_HAS_MII)
   2898 		mii_tick(&sc->sc_mii);
   2899 	else if ((sc->sc_type >= WM_T_82575)
   2900 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2901 		wm_serdes_tick(sc);
   2902 	else
   2903 		wm_tbi_tick(sc);
   2904 
   2905 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2906 out:
   2907 	WM_CORE_UNLOCK(sc);
   2908 #ifndef WM_MPSAFE
   2909 	splx(s);
   2910 #endif
   2911 }
   2912 
   2913 static int
   2914 wm_ifflags_cb(struct ethercom *ec)
   2915 {
   2916 	struct ifnet *ifp = &ec->ec_if;
   2917 	struct wm_softc *sc = ifp->if_softc;
   2918 	int rc = 0;
   2919 
   2920 	WM_CORE_LOCK(sc);
   2921 
   2922 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2923 	sc->sc_if_flags = ifp->if_flags;
   2924 
   2925 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2926 		rc = ENETRESET;
   2927 		goto out;
   2928 	}
   2929 
   2930 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2931 		wm_set_filter(sc);
   2932 
   2933 	wm_set_vlan(sc);
   2934 
   2935 out:
   2936 	WM_CORE_UNLOCK(sc);
   2937 
   2938 	return rc;
   2939 }
   2940 
   2941 /*
   2942  * wm_ioctl:		[ifnet interface function]
   2943  *
   2944  *	Handle control requests from the operator.
   2945  */
   2946 static int
   2947 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2948 {
   2949 	struct wm_softc *sc = ifp->if_softc;
   2950 	struct ifreq *ifr = (struct ifreq *) data;
   2951 	struct ifaddr *ifa = (struct ifaddr *)data;
   2952 	struct sockaddr_dl *sdl;
   2953 	int s, error;
   2954 
   2955 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2956 		device_xname(sc->sc_dev), __func__));
   2957 
   2958 #ifndef WM_MPSAFE
   2959 	s = splnet();
   2960 #endif
   2961 	switch (cmd) {
   2962 	case SIOCSIFMEDIA:
   2963 	case SIOCGIFMEDIA:
   2964 		WM_CORE_LOCK(sc);
   2965 		/* Flow control requires full-duplex mode. */
   2966 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2967 		    (ifr->ifr_media & IFM_FDX) == 0)
   2968 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2969 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2970 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2971 				/* We can do both TXPAUSE and RXPAUSE. */
   2972 				ifr->ifr_media |=
   2973 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2974 			}
   2975 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2976 		}
   2977 		WM_CORE_UNLOCK(sc);
   2978 #ifdef WM_MPSAFE
   2979 		s = splnet();
   2980 #endif
   2981 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2982 #ifdef WM_MPSAFE
   2983 		splx(s);
   2984 #endif
   2985 		break;
   2986 	case SIOCINITIFADDR:
   2987 		WM_CORE_LOCK(sc);
   2988 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2989 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2990 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2991 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2992 			/* unicast address is first multicast entry */
   2993 			wm_set_filter(sc);
   2994 			error = 0;
   2995 			WM_CORE_UNLOCK(sc);
   2996 			break;
   2997 		}
   2998 		WM_CORE_UNLOCK(sc);
   2999 		/*FALLTHROUGH*/
   3000 	default:
   3001 #ifdef WM_MPSAFE
   3002 		s = splnet();
   3003 #endif
   3004 		/* It may call wm_start, so unlock here */
   3005 		error = ether_ioctl(ifp, cmd, data);
   3006 #ifdef WM_MPSAFE
   3007 		splx(s);
   3008 #endif
   3009 		if (error != ENETRESET)
   3010 			break;
   3011 
   3012 		error = 0;
   3013 
   3014 		if (cmd == SIOCSIFCAP) {
   3015 			error = (*ifp->if_init)(ifp);
   3016 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3017 			;
   3018 		else if (ifp->if_flags & IFF_RUNNING) {
   3019 			/*
   3020 			 * Multicast list has changed; set the hardware filter
   3021 			 * accordingly.
   3022 			 */
   3023 			WM_CORE_LOCK(sc);
   3024 			wm_set_filter(sc);
   3025 			WM_CORE_UNLOCK(sc);
   3026 		}
   3027 		break;
   3028 	}
   3029 
   3030 #ifndef WM_MPSAFE
   3031 	splx(s);
   3032 #endif
   3033 	return error;
   3034 }
   3035 
   3036 /* MAC address related */
   3037 
   3038 /*
   3039  * Get the offset of MAC address and return it.
   3040  * If error occured, use offset 0.
   3041  */
   3042 static uint16_t
   3043 wm_check_alt_mac_addr(struct wm_softc *sc)
   3044 {
   3045 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3046 	uint16_t offset = NVM_OFF_MACADDR;
   3047 
   3048 	/* Try to read alternative MAC address pointer */
   3049 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3050 		return 0;
   3051 
   3052 	/* Check pointer if it's valid or not. */
   3053 	if ((offset == 0x0000) || (offset == 0xffff))
   3054 		return 0;
   3055 
   3056 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3057 	/*
   3058 	 * Check whether alternative MAC address is valid or not.
   3059 	 * Some cards have non 0xffff pointer but those don't use
   3060 	 * alternative MAC address in reality.
   3061 	 *
   3062 	 * Check whether the broadcast bit is set or not.
   3063 	 */
   3064 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3065 		if (((myea[0] & 0xff) & 0x01) == 0)
   3066 			return offset; /* Found */
   3067 
   3068 	/* Not found */
   3069 	return 0;
   3070 }
   3071 
   3072 static int
   3073 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3074 {
   3075 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3076 	uint16_t offset = NVM_OFF_MACADDR;
   3077 	int do_invert = 0;
   3078 
   3079 	switch (sc->sc_type) {
   3080 	case WM_T_82580:
   3081 	case WM_T_I350:
   3082 	case WM_T_I354:
   3083 		/* EEPROM Top Level Partitioning */
   3084 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3085 		break;
   3086 	case WM_T_82571:
   3087 	case WM_T_82575:
   3088 	case WM_T_82576:
   3089 	case WM_T_80003:
   3090 	case WM_T_I210:
   3091 	case WM_T_I211:
   3092 		offset = wm_check_alt_mac_addr(sc);
   3093 		if (offset == 0)
   3094 			if ((sc->sc_funcid & 0x01) == 1)
   3095 				do_invert = 1;
   3096 		break;
   3097 	default:
   3098 		if ((sc->sc_funcid & 0x01) == 1)
   3099 			do_invert = 1;
   3100 		break;
   3101 	}
   3102 
   3103 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3104 		goto bad;
   3105 
   3106 	enaddr[0] = myea[0] & 0xff;
   3107 	enaddr[1] = myea[0] >> 8;
   3108 	enaddr[2] = myea[1] & 0xff;
   3109 	enaddr[3] = myea[1] >> 8;
   3110 	enaddr[4] = myea[2] & 0xff;
   3111 	enaddr[5] = myea[2] >> 8;
   3112 
   3113 	/*
   3114 	 * Toggle the LSB of the MAC address on the second port
   3115 	 * of some dual port cards.
   3116 	 */
   3117 	if (do_invert != 0)
   3118 		enaddr[5] ^= 1;
   3119 
   3120 	return 0;
   3121 
   3122  bad:
   3123 	return -1;
   3124 }
   3125 
   3126 /*
   3127  * wm_set_ral:
   3128  *
   3129  *	Set an entery in the receive address list.
   3130  */
   3131 static void
   3132 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3133 {
   3134 	uint32_t ral_lo, ral_hi;
   3135 
   3136 	if (enaddr != NULL) {
   3137 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3138 		    (enaddr[3] << 24);
   3139 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3140 		ral_hi |= RAL_AV;
   3141 	} else {
   3142 		ral_lo = 0;
   3143 		ral_hi = 0;
   3144 	}
   3145 
   3146 	if (sc->sc_type >= WM_T_82544) {
   3147 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3148 		    ral_lo);
   3149 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3150 		    ral_hi);
   3151 	} else {
   3152 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3153 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3154 	}
   3155 }
   3156 
   3157 /*
   3158  * wm_mchash:
   3159  *
   3160  *	Compute the hash of the multicast address for the 4096-bit
   3161  *	multicast filter.
   3162  */
   3163 static uint32_t
   3164 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3165 {
   3166 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3167 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3168 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3169 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3170 	uint32_t hash;
   3171 
   3172 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3173 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3174 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3175 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3176 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3177 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3178 		return (hash & 0x3ff);
   3179 	}
   3180 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3181 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3182 
   3183 	return (hash & 0xfff);
   3184 }
   3185 
   3186 /*
   3187  * wm_set_filter:
   3188  *
   3189  *	Set up the receive filter.
   3190  */
   3191 static void
   3192 wm_set_filter(struct wm_softc *sc)
   3193 {
   3194 	struct ethercom *ec = &sc->sc_ethercom;
   3195 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3196 	struct ether_multi *enm;
   3197 	struct ether_multistep step;
   3198 	bus_addr_t mta_reg;
   3199 	uint32_t hash, reg, bit;
   3200 	int i, size, ralmax;
   3201 
   3202 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3203 		device_xname(sc->sc_dev), __func__));
   3204 
   3205 	if (sc->sc_type >= WM_T_82544)
   3206 		mta_reg = WMREG_CORDOVA_MTA;
   3207 	else
   3208 		mta_reg = WMREG_MTA;
   3209 
   3210 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3211 
   3212 	if (ifp->if_flags & IFF_BROADCAST)
   3213 		sc->sc_rctl |= RCTL_BAM;
   3214 	if (ifp->if_flags & IFF_PROMISC) {
   3215 		sc->sc_rctl |= RCTL_UPE;
   3216 		goto allmulti;
   3217 	}
   3218 
   3219 	/*
   3220 	 * Set the station address in the first RAL slot, and
   3221 	 * clear the remaining slots.
   3222 	 */
   3223 	if (sc->sc_type == WM_T_ICH8)
   3224 		size = WM_RAL_TABSIZE_ICH8 -1;
   3225 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3226 	    || (sc->sc_type == WM_T_PCH))
   3227 		size = WM_RAL_TABSIZE_ICH8;
   3228 	else if (sc->sc_type == WM_T_PCH2)
   3229 		size = WM_RAL_TABSIZE_PCH2;
   3230 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3231 		size = WM_RAL_TABSIZE_PCH_LPT;
   3232 	else if (sc->sc_type == WM_T_82575)
   3233 		size = WM_RAL_TABSIZE_82575;
   3234 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3235 		size = WM_RAL_TABSIZE_82576;
   3236 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3237 		size = WM_RAL_TABSIZE_I350;
   3238 	else
   3239 		size = WM_RAL_TABSIZE;
   3240 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3241 
   3242 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3243 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3244 		switch (i) {
   3245 		case 0:
   3246 			/* We can use all entries */
   3247 			ralmax = size;
   3248 			break;
   3249 		case 1:
   3250 			/* Only RAR[0] */
   3251 			ralmax = 1;
   3252 			break;
   3253 		default:
   3254 			/* available SHRA + RAR[0] */
   3255 			ralmax = i + 1;
   3256 		}
   3257 	} else
   3258 		ralmax = size;
   3259 	for (i = 1; i < size; i++) {
   3260 		if (i < ralmax)
   3261 			wm_set_ral(sc, NULL, i);
   3262 	}
   3263 
   3264 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3265 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3266 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3267 	    || (sc->sc_type == WM_T_PCH_SPT))
   3268 		size = WM_ICH8_MC_TABSIZE;
   3269 	else
   3270 		size = WM_MC_TABSIZE;
   3271 	/* Clear out the multicast table. */
   3272 	for (i = 0; i < size; i++)
   3273 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3274 
   3275 	ETHER_LOCK(ec);
   3276 	ETHER_FIRST_MULTI(step, ec, enm);
   3277 	while (enm != NULL) {
   3278 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3279 			ETHER_UNLOCK(ec);
   3280 			/*
   3281 			 * We must listen to a range of multicast addresses.
   3282 			 * For now, just accept all multicasts, rather than
   3283 			 * trying to set only those filter bits needed to match
   3284 			 * the range.  (At this time, the only use of address
   3285 			 * ranges is for IP multicast routing, for which the
   3286 			 * range is big enough to require all bits set.)
   3287 			 */
   3288 			goto allmulti;
   3289 		}
   3290 
   3291 		hash = wm_mchash(sc, enm->enm_addrlo);
   3292 
   3293 		reg = (hash >> 5);
   3294 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3295 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3296 		    || (sc->sc_type == WM_T_PCH2)
   3297 		    || (sc->sc_type == WM_T_PCH_LPT)
   3298 		    || (sc->sc_type == WM_T_PCH_SPT))
   3299 			reg &= 0x1f;
   3300 		else
   3301 			reg &= 0x7f;
   3302 		bit = hash & 0x1f;
   3303 
   3304 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3305 		hash |= 1U << bit;
   3306 
   3307 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3308 			/*
   3309 			 * 82544 Errata 9: Certain register cannot be written
   3310 			 * with particular alignments in PCI-X bus operation
   3311 			 * (FCAH, MTA and VFTA).
   3312 			 */
   3313 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3314 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3315 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3316 		} else
   3317 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3318 
   3319 		ETHER_NEXT_MULTI(step, enm);
   3320 	}
   3321 	ETHER_UNLOCK(ec);
   3322 
   3323 	ifp->if_flags &= ~IFF_ALLMULTI;
   3324 	goto setit;
   3325 
   3326  allmulti:
   3327 	ifp->if_flags |= IFF_ALLMULTI;
   3328 	sc->sc_rctl |= RCTL_MPE;
   3329 
   3330  setit:
   3331 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3332 }
   3333 
   3334 /* Reset and init related */
   3335 
   3336 static void
   3337 wm_set_vlan(struct wm_softc *sc)
   3338 {
   3339 
   3340 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3341 		device_xname(sc->sc_dev), __func__));
   3342 
   3343 	/* Deal with VLAN enables. */
   3344 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3345 		sc->sc_ctrl |= CTRL_VME;
   3346 	else
   3347 		sc->sc_ctrl &= ~CTRL_VME;
   3348 
   3349 	/* Write the control registers. */
   3350 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3351 }
   3352 
   3353 static void
   3354 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3355 {
   3356 	uint32_t gcr;
   3357 	pcireg_t ctrl2;
   3358 
   3359 	gcr = CSR_READ(sc, WMREG_GCR);
   3360 
   3361 	/* Only take action if timeout value is defaulted to 0 */
   3362 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3363 		goto out;
   3364 
   3365 	if ((gcr & GCR_CAP_VER2) == 0) {
   3366 		gcr |= GCR_CMPL_TMOUT_10MS;
   3367 		goto out;
   3368 	}
   3369 
   3370 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3371 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3372 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3373 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3374 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3375 
   3376 out:
   3377 	/* Disable completion timeout resend */
   3378 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3379 
   3380 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3381 }
   3382 
   3383 void
   3384 wm_get_auto_rd_done(struct wm_softc *sc)
   3385 {
   3386 	int i;
   3387 
   3388 	/* wait for eeprom to reload */
   3389 	switch (sc->sc_type) {
   3390 	case WM_T_82571:
   3391 	case WM_T_82572:
   3392 	case WM_T_82573:
   3393 	case WM_T_82574:
   3394 	case WM_T_82583:
   3395 	case WM_T_82575:
   3396 	case WM_T_82576:
   3397 	case WM_T_82580:
   3398 	case WM_T_I350:
   3399 	case WM_T_I354:
   3400 	case WM_T_I210:
   3401 	case WM_T_I211:
   3402 	case WM_T_80003:
   3403 	case WM_T_ICH8:
   3404 	case WM_T_ICH9:
   3405 		for (i = 0; i < 10; i++) {
   3406 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3407 				break;
   3408 			delay(1000);
   3409 		}
   3410 		if (i == 10) {
   3411 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3412 			    "complete\n", device_xname(sc->sc_dev));
   3413 		}
   3414 		break;
   3415 	default:
   3416 		break;
   3417 	}
   3418 }
   3419 
   3420 void
   3421 wm_lan_init_done(struct wm_softc *sc)
   3422 {
   3423 	uint32_t reg = 0;
   3424 	int i;
   3425 
   3426 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3427 		device_xname(sc->sc_dev), __func__));
   3428 
   3429 	/* Wait for eeprom to reload */
   3430 	switch (sc->sc_type) {
   3431 	case WM_T_ICH10:
   3432 	case WM_T_PCH:
   3433 	case WM_T_PCH2:
   3434 	case WM_T_PCH_LPT:
   3435 	case WM_T_PCH_SPT:
   3436 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3437 			reg = CSR_READ(sc, WMREG_STATUS);
   3438 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3439 				break;
   3440 			delay(100);
   3441 		}
   3442 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3443 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3444 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3445 		}
   3446 		break;
   3447 	default:
   3448 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3449 		    __func__);
   3450 		break;
   3451 	}
   3452 
   3453 	reg &= ~STATUS_LAN_INIT_DONE;
   3454 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3455 }
   3456 
   3457 void
   3458 wm_get_cfg_done(struct wm_softc *sc)
   3459 {
   3460 	int mask;
   3461 	uint32_t reg;
   3462 	int i;
   3463 
   3464 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3465 		device_xname(sc->sc_dev), __func__));
   3466 
   3467 	/* Wait for eeprom to reload */
   3468 	switch (sc->sc_type) {
   3469 	case WM_T_82542_2_0:
   3470 	case WM_T_82542_2_1:
   3471 		/* null */
   3472 		break;
   3473 	case WM_T_82543:
   3474 	case WM_T_82544:
   3475 	case WM_T_82540:
   3476 	case WM_T_82545:
   3477 	case WM_T_82545_3:
   3478 	case WM_T_82546:
   3479 	case WM_T_82546_3:
   3480 	case WM_T_82541:
   3481 	case WM_T_82541_2:
   3482 	case WM_T_82547:
   3483 	case WM_T_82547_2:
   3484 	case WM_T_82573:
   3485 	case WM_T_82574:
   3486 	case WM_T_82583:
   3487 		/* generic */
   3488 		delay(10*1000);
   3489 		break;
   3490 	case WM_T_80003:
   3491 	case WM_T_82571:
   3492 	case WM_T_82572:
   3493 	case WM_T_82575:
   3494 	case WM_T_82576:
   3495 	case WM_T_82580:
   3496 	case WM_T_I350:
   3497 	case WM_T_I354:
   3498 	case WM_T_I210:
   3499 	case WM_T_I211:
   3500 		if (sc->sc_type == WM_T_82571) {
   3501 			/* Only 82571 shares port 0 */
   3502 			mask = EEMNGCTL_CFGDONE_0;
   3503 		} else
   3504 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3505 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3506 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3507 				break;
   3508 			delay(1000);
   3509 		}
   3510 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3511 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3512 				device_xname(sc->sc_dev), __func__));
   3513 		}
   3514 		break;
   3515 	case WM_T_ICH8:
   3516 	case WM_T_ICH9:
   3517 	case WM_T_ICH10:
   3518 	case WM_T_PCH:
   3519 	case WM_T_PCH2:
   3520 	case WM_T_PCH_LPT:
   3521 	case WM_T_PCH_SPT:
   3522 		delay(10*1000);
   3523 		if (sc->sc_type >= WM_T_ICH10)
   3524 			wm_lan_init_done(sc);
   3525 		else
   3526 			wm_get_auto_rd_done(sc);
   3527 
   3528 		reg = CSR_READ(sc, WMREG_STATUS);
   3529 		if ((reg & STATUS_PHYRA) != 0)
   3530 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3531 		break;
   3532 	default:
   3533 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3534 		    __func__);
   3535 		break;
   3536 	}
   3537 }
   3538 
   3539 /* Init hardware bits */
   3540 void
   3541 wm_initialize_hardware_bits(struct wm_softc *sc)
   3542 {
   3543 	uint32_t tarc0, tarc1, reg;
   3544 
   3545 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3546 		device_xname(sc->sc_dev), __func__));
   3547 
   3548 	/* For 82571 variant, 80003 and ICHs */
   3549 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3550 	    || (sc->sc_type >= WM_T_80003)) {
   3551 
   3552 		/* Transmit Descriptor Control 0 */
   3553 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3554 		reg |= TXDCTL_COUNT_DESC;
   3555 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3556 
   3557 		/* Transmit Descriptor Control 1 */
   3558 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3559 		reg |= TXDCTL_COUNT_DESC;
   3560 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3561 
   3562 		/* TARC0 */
   3563 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3564 		switch (sc->sc_type) {
   3565 		case WM_T_82571:
   3566 		case WM_T_82572:
   3567 		case WM_T_82573:
   3568 		case WM_T_82574:
   3569 		case WM_T_82583:
   3570 		case WM_T_80003:
   3571 			/* Clear bits 30..27 */
   3572 			tarc0 &= ~__BITS(30, 27);
   3573 			break;
   3574 		default:
   3575 			break;
   3576 		}
   3577 
   3578 		switch (sc->sc_type) {
   3579 		case WM_T_82571:
   3580 		case WM_T_82572:
   3581 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3582 
   3583 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3584 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3585 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3586 			/* 8257[12] Errata No.7 */
   3587 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3588 
   3589 			/* TARC1 bit 28 */
   3590 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3591 				tarc1 &= ~__BIT(28);
   3592 			else
   3593 				tarc1 |= __BIT(28);
   3594 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3595 
   3596 			/*
   3597 			 * 8257[12] Errata No.13
   3598 			 * Disable Dyamic Clock Gating.
   3599 			 */
   3600 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3601 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3602 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3603 			break;
   3604 		case WM_T_82573:
   3605 		case WM_T_82574:
   3606 		case WM_T_82583:
   3607 			if ((sc->sc_type == WM_T_82574)
   3608 			    || (sc->sc_type == WM_T_82583))
   3609 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3610 
   3611 			/* Extended Device Control */
   3612 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3613 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3614 			reg |= __BIT(22);	/* Set bit 22 */
   3615 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3616 
   3617 			/* Device Control */
   3618 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3619 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3620 
   3621 			/* PCIe Control Register */
   3622 			/*
   3623 			 * 82573 Errata (unknown).
   3624 			 *
   3625 			 * 82574 Errata 25 and 82583 Errata 12
   3626 			 * "Dropped Rx Packets":
   3627 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3628 			 */
   3629 			reg = CSR_READ(sc, WMREG_GCR);
   3630 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3631 			CSR_WRITE(sc, WMREG_GCR, reg);
   3632 
   3633 			if ((sc->sc_type == WM_T_82574)
   3634 			    || (sc->sc_type == WM_T_82583)) {
   3635 				/*
   3636 				 * Document says this bit must be set for
   3637 				 * proper operation.
   3638 				 */
   3639 				reg = CSR_READ(sc, WMREG_GCR);
   3640 				reg |= __BIT(22);
   3641 				CSR_WRITE(sc, WMREG_GCR, reg);
   3642 
   3643 				/*
   3644 				 * Apply workaround for hardware errata
   3645 				 * documented in errata docs Fixes issue where
   3646 				 * some error prone or unreliable PCIe
   3647 				 * completions are occurring, particularly
   3648 				 * with ASPM enabled. Without fix, issue can
   3649 				 * cause Tx timeouts.
   3650 				 */
   3651 				reg = CSR_READ(sc, WMREG_GCR2);
   3652 				reg |= __BIT(0);
   3653 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3654 			}
   3655 			break;
   3656 		case WM_T_80003:
   3657 			/* TARC0 */
   3658 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3659 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3660 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3661 
   3662 			/* TARC1 bit 28 */
   3663 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3664 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3665 				tarc1 &= ~__BIT(28);
   3666 			else
   3667 				tarc1 |= __BIT(28);
   3668 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3669 			break;
   3670 		case WM_T_ICH8:
   3671 		case WM_T_ICH9:
   3672 		case WM_T_ICH10:
   3673 		case WM_T_PCH:
   3674 		case WM_T_PCH2:
   3675 		case WM_T_PCH_LPT:
   3676 		case WM_T_PCH_SPT:
   3677 			/* TARC0 */
   3678 			if ((sc->sc_type == WM_T_ICH8)
   3679 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3680 				/* Set TARC0 bits 29 and 28 */
   3681 				tarc0 |= __BITS(29, 28);
   3682 			}
   3683 			/* Set TARC0 bits 23,24,26,27 */
   3684 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3685 
   3686 			/* CTRL_EXT */
   3687 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3688 			reg |= __BIT(22);	/* Set bit 22 */
   3689 			/*
   3690 			 * Enable PHY low-power state when MAC is at D3
   3691 			 * w/o WoL
   3692 			 */
   3693 			if (sc->sc_type >= WM_T_PCH)
   3694 				reg |= CTRL_EXT_PHYPDEN;
   3695 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3696 
   3697 			/* TARC1 */
   3698 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3699 			/* bit 28 */
   3700 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3701 				tarc1 &= ~__BIT(28);
   3702 			else
   3703 				tarc1 |= __BIT(28);
   3704 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3705 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3706 
   3707 			/* Device Status */
   3708 			if (sc->sc_type == WM_T_ICH8) {
   3709 				reg = CSR_READ(sc, WMREG_STATUS);
   3710 				reg &= ~__BIT(31);
   3711 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3712 
   3713 			}
   3714 
   3715 			/* IOSFPC */
   3716 			if (sc->sc_type == WM_T_PCH_SPT) {
   3717 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3718 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3719 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3720 			}
   3721 			/*
   3722 			 * Work-around descriptor data corruption issue during
   3723 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3724 			 * capability.
   3725 			 */
   3726 			reg = CSR_READ(sc, WMREG_RFCTL);
   3727 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3728 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3729 			break;
   3730 		default:
   3731 			break;
   3732 		}
   3733 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3734 
   3735 		switch (sc->sc_type) {
   3736 		/*
   3737 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3738 		 * Avoid RSS Hash Value bug.
   3739 		 */
   3740 		case WM_T_82571:
   3741 		case WM_T_82572:
   3742 		case WM_T_82573:
   3743 		case WM_T_80003:
   3744 		case WM_T_ICH8:
   3745 			reg = CSR_READ(sc, WMREG_RFCTL);
   3746 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3747 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3748 			break;
   3749 		case WM_T_82574:
   3750 			/* use extened Rx descriptor. */
   3751 			reg = CSR_READ(sc, WMREG_RFCTL);
   3752 			reg |= WMREG_RFCTL_EXSTEN;
   3753 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3754 			break;
   3755 		default:
   3756 			break;
   3757 		}
   3758 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   3759 		/*
   3760 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3761 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3762 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3763 		 * Correctly by the Device"
   3764 		 *
   3765 		 * I354(C2000) Errata AVR53:
   3766 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3767 		 * Hang"
   3768 		 */
   3769 		reg = CSR_READ(sc, WMREG_RFCTL);
   3770 		reg |= WMREG_RFCTL_IPV6EXDIS;
   3771 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   3772 	}
   3773 }
   3774 
   3775 static uint32_t
   3776 wm_rxpbs_adjust_82580(uint32_t val)
   3777 {
   3778 	uint32_t rv = 0;
   3779 
   3780 	if (val < __arraycount(wm_82580_rxpbs_table))
   3781 		rv = wm_82580_rxpbs_table[val];
   3782 
   3783 	return rv;
   3784 }
   3785 
   3786 /*
   3787  * wm_reset_phy:
   3788  *
   3789  *	generic PHY reset function.
   3790  *	Same as e1000_phy_hw_reset_generic()
   3791  */
   3792 static void
   3793 wm_reset_phy(struct wm_softc *sc)
   3794 {
   3795 	uint32_t reg;
   3796 
   3797 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3798 		device_xname(sc->sc_dev), __func__));
   3799 	if (wm_phy_resetisblocked(sc))
   3800 		return;
   3801 
   3802 	sc->phy.acquire(sc);
   3803 
   3804 	reg = CSR_READ(sc, WMREG_CTRL);
   3805 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3806 	CSR_WRITE_FLUSH(sc);
   3807 
   3808 	delay(sc->phy.reset_delay_us);
   3809 
   3810 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3811 	CSR_WRITE_FLUSH(sc);
   3812 
   3813 	delay(150);
   3814 
   3815 	sc->phy.release(sc);
   3816 
   3817 	wm_get_cfg_done(sc);
   3818 }
   3819 
   3820 static void
   3821 wm_flush_desc_rings(struct wm_softc *sc)
   3822 {
   3823 	pcireg_t preg;
   3824 	uint32_t reg;
   3825 	int nexttx;
   3826 
   3827 	/* First, disable MULR fix in FEXTNVM11 */
   3828 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3829 	reg |= FEXTNVM11_DIS_MULRFIX;
   3830 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3831 
   3832 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3833 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3834 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3835 		struct wm_txqueue *txq;
   3836 		wiseman_txdesc_t *txd;
   3837 
   3838 		/* TX */
   3839 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3840 		    device_xname(sc->sc_dev), preg, reg);
   3841 		reg = CSR_READ(sc, WMREG_TCTL);
   3842 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3843 
   3844 		txq = &sc->sc_queue[0].wmq_txq;
   3845 		nexttx = txq->txq_next;
   3846 		txd = &txq->txq_descs[nexttx];
   3847 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3848 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3849 		txd->wtx_fields.wtxu_status = 0;
   3850 		txd->wtx_fields.wtxu_options = 0;
   3851 		txd->wtx_fields.wtxu_vlan = 0;
   3852 
   3853 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3854 			BUS_SPACE_BARRIER_WRITE);
   3855 
   3856 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3857 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3858 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3859 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3860 		delay(250);
   3861 	}
   3862 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3863 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3864 		uint32_t rctl;
   3865 
   3866 		/* RX */
   3867 		printf("%s: Need RX flush (reg = %08x)\n",
   3868 		    device_xname(sc->sc_dev), preg);
   3869 		rctl = CSR_READ(sc, WMREG_RCTL);
   3870 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3871 		CSR_WRITE_FLUSH(sc);
   3872 		delay(150);
   3873 
   3874 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3875 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3876 		reg &= 0xffffc000;
   3877 		/*
   3878 		 * update thresholds: prefetch threshold to 31, host threshold
   3879 		 * to 1 and make sure the granularity is "descriptors" and not
   3880 		 * "cache lines"
   3881 		 */
   3882 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3883 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3884 
   3885 		/*
   3886 		 * momentarily enable the RX ring for the changes to take
   3887 		 * effect
   3888 		 */
   3889 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3890 		CSR_WRITE_FLUSH(sc);
   3891 		delay(150);
   3892 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3893 	}
   3894 }
   3895 
   3896 /*
   3897  * wm_reset:
   3898  *
   3899  *	Reset the i82542 chip.
   3900  */
   3901 static void
   3902 wm_reset(struct wm_softc *sc)
   3903 {
   3904 	int phy_reset = 0;
   3905 	int i, error = 0;
   3906 	uint32_t reg;
   3907 
   3908 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3909 		device_xname(sc->sc_dev), __func__));
   3910 	KASSERT(sc->sc_type != 0);
   3911 
   3912 	/*
   3913 	 * Allocate on-chip memory according to the MTU size.
   3914 	 * The Packet Buffer Allocation register must be written
   3915 	 * before the chip is reset.
   3916 	 */
   3917 	switch (sc->sc_type) {
   3918 	case WM_T_82547:
   3919 	case WM_T_82547_2:
   3920 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3921 		    PBA_22K : PBA_30K;
   3922 		for (i = 0; i < sc->sc_nqueues; i++) {
   3923 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3924 			txq->txq_fifo_head = 0;
   3925 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3926 			txq->txq_fifo_size =
   3927 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3928 			txq->txq_fifo_stall = 0;
   3929 		}
   3930 		break;
   3931 	case WM_T_82571:
   3932 	case WM_T_82572:
   3933 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3934 	case WM_T_80003:
   3935 		sc->sc_pba = PBA_32K;
   3936 		break;
   3937 	case WM_T_82573:
   3938 		sc->sc_pba = PBA_12K;
   3939 		break;
   3940 	case WM_T_82574:
   3941 	case WM_T_82583:
   3942 		sc->sc_pba = PBA_20K;
   3943 		break;
   3944 	case WM_T_82576:
   3945 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3946 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3947 		break;
   3948 	case WM_T_82580:
   3949 	case WM_T_I350:
   3950 	case WM_T_I354:
   3951 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3952 		break;
   3953 	case WM_T_I210:
   3954 	case WM_T_I211:
   3955 		sc->sc_pba = PBA_34K;
   3956 		break;
   3957 	case WM_T_ICH8:
   3958 		/* Workaround for a bit corruption issue in FIFO memory */
   3959 		sc->sc_pba = PBA_8K;
   3960 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3961 		break;
   3962 	case WM_T_ICH9:
   3963 	case WM_T_ICH10:
   3964 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3965 		    PBA_14K : PBA_10K;
   3966 		break;
   3967 	case WM_T_PCH:
   3968 	case WM_T_PCH2:
   3969 	case WM_T_PCH_LPT:
   3970 	case WM_T_PCH_SPT:
   3971 		sc->sc_pba = PBA_26K;
   3972 		break;
   3973 	default:
   3974 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3975 		    PBA_40K : PBA_48K;
   3976 		break;
   3977 	}
   3978 	/*
   3979 	 * Only old or non-multiqueue devices have the PBA register
   3980 	 * XXX Need special handling for 82575.
   3981 	 */
   3982 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3983 	    || (sc->sc_type == WM_T_82575))
   3984 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3985 
   3986 	/* Prevent the PCI-E bus from sticking */
   3987 	if (sc->sc_flags & WM_F_PCIE) {
   3988 		int timeout = 800;
   3989 
   3990 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3991 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3992 
   3993 		while (timeout--) {
   3994 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3995 			    == 0)
   3996 				break;
   3997 			delay(100);
   3998 		}
   3999 	}
   4000 
   4001 	/* Set the completion timeout for interface */
   4002 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4003 	    || (sc->sc_type == WM_T_82580)
   4004 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4005 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4006 		wm_set_pcie_completion_timeout(sc);
   4007 
   4008 	/* Clear interrupt */
   4009 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4010 	if (sc->sc_nintrs > 1) {
   4011 		if (sc->sc_type != WM_T_82574) {
   4012 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4013 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4014 		} else {
   4015 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4016 		}
   4017 	}
   4018 
   4019 	/* Stop the transmit and receive processes. */
   4020 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4021 	sc->sc_rctl &= ~RCTL_EN;
   4022 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4023 	CSR_WRITE_FLUSH(sc);
   4024 
   4025 	/* XXX set_tbi_sbp_82543() */
   4026 
   4027 	delay(10*1000);
   4028 
   4029 	/* Must acquire the MDIO ownership before MAC reset */
   4030 	switch (sc->sc_type) {
   4031 	case WM_T_82573:
   4032 	case WM_T_82574:
   4033 	case WM_T_82583:
   4034 		error = wm_get_hw_semaphore_82573(sc);
   4035 		break;
   4036 	default:
   4037 		break;
   4038 	}
   4039 
   4040 	/*
   4041 	 * 82541 Errata 29? & 82547 Errata 28?
   4042 	 * See also the description about PHY_RST bit in CTRL register
   4043 	 * in 8254x_GBe_SDM.pdf.
   4044 	 */
   4045 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4046 		CSR_WRITE(sc, WMREG_CTRL,
   4047 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4048 		CSR_WRITE_FLUSH(sc);
   4049 		delay(5000);
   4050 	}
   4051 
   4052 	switch (sc->sc_type) {
   4053 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4054 	case WM_T_82541:
   4055 	case WM_T_82541_2:
   4056 	case WM_T_82547:
   4057 	case WM_T_82547_2:
   4058 		/*
   4059 		 * On some chipsets, a reset through a memory-mapped write
   4060 		 * cycle can cause the chip to reset before completing the
   4061 		 * write cycle.  This causes major headache that can be
   4062 		 * avoided by issuing the reset via indirect register writes
   4063 		 * through I/O space.
   4064 		 *
   4065 		 * So, if we successfully mapped the I/O BAR at attach time,
   4066 		 * use that.  Otherwise, try our luck with a memory-mapped
   4067 		 * reset.
   4068 		 */
   4069 		if (sc->sc_flags & WM_F_IOH_VALID)
   4070 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4071 		else
   4072 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4073 		break;
   4074 	case WM_T_82545_3:
   4075 	case WM_T_82546_3:
   4076 		/* Use the shadow control register on these chips. */
   4077 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4078 		break;
   4079 	case WM_T_80003:
   4080 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4081 		sc->phy.acquire(sc);
   4082 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4083 		sc->phy.release(sc);
   4084 		break;
   4085 	case WM_T_ICH8:
   4086 	case WM_T_ICH9:
   4087 	case WM_T_ICH10:
   4088 	case WM_T_PCH:
   4089 	case WM_T_PCH2:
   4090 	case WM_T_PCH_LPT:
   4091 	case WM_T_PCH_SPT:
   4092 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4093 		if (wm_phy_resetisblocked(sc) == false) {
   4094 			/*
   4095 			 * Gate automatic PHY configuration by hardware on
   4096 			 * non-managed 82579
   4097 			 */
   4098 			if ((sc->sc_type == WM_T_PCH2)
   4099 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4100 				== 0))
   4101 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4102 
   4103 			reg |= CTRL_PHY_RESET;
   4104 			phy_reset = 1;
   4105 		} else
   4106 			printf("XXX reset is blocked!!!\n");
   4107 		sc->phy.acquire(sc);
   4108 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4109 		/* Don't insert a completion barrier when reset */
   4110 		delay(20*1000);
   4111 		mutex_exit(sc->sc_ich_phymtx);
   4112 		break;
   4113 	case WM_T_82580:
   4114 	case WM_T_I350:
   4115 	case WM_T_I354:
   4116 	case WM_T_I210:
   4117 	case WM_T_I211:
   4118 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4119 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4120 			CSR_WRITE_FLUSH(sc);
   4121 		delay(5000);
   4122 		break;
   4123 	case WM_T_82542_2_0:
   4124 	case WM_T_82542_2_1:
   4125 	case WM_T_82543:
   4126 	case WM_T_82540:
   4127 	case WM_T_82545:
   4128 	case WM_T_82546:
   4129 	case WM_T_82571:
   4130 	case WM_T_82572:
   4131 	case WM_T_82573:
   4132 	case WM_T_82574:
   4133 	case WM_T_82575:
   4134 	case WM_T_82576:
   4135 	case WM_T_82583:
   4136 	default:
   4137 		/* Everything else can safely use the documented method. */
   4138 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4139 		break;
   4140 	}
   4141 
   4142 	/* Must release the MDIO ownership after MAC reset */
   4143 	switch (sc->sc_type) {
   4144 	case WM_T_82573:
   4145 	case WM_T_82574:
   4146 	case WM_T_82583:
   4147 		if (error == 0)
   4148 			wm_put_hw_semaphore_82573(sc);
   4149 		break;
   4150 	default:
   4151 		break;
   4152 	}
   4153 
   4154 	if (phy_reset != 0)
   4155 		wm_get_cfg_done(sc);
   4156 
   4157 	/* reload EEPROM */
   4158 	switch (sc->sc_type) {
   4159 	case WM_T_82542_2_0:
   4160 	case WM_T_82542_2_1:
   4161 	case WM_T_82543:
   4162 	case WM_T_82544:
   4163 		delay(10);
   4164 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4165 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4166 		CSR_WRITE_FLUSH(sc);
   4167 		delay(2000);
   4168 		break;
   4169 	case WM_T_82540:
   4170 	case WM_T_82545:
   4171 	case WM_T_82545_3:
   4172 	case WM_T_82546:
   4173 	case WM_T_82546_3:
   4174 		delay(5*1000);
   4175 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4176 		break;
   4177 	case WM_T_82541:
   4178 	case WM_T_82541_2:
   4179 	case WM_T_82547:
   4180 	case WM_T_82547_2:
   4181 		delay(20000);
   4182 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4183 		break;
   4184 	case WM_T_82571:
   4185 	case WM_T_82572:
   4186 	case WM_T_82573:
   4187 	case WM_T_82574:
   4188 	case WM_T_82583:
   4189 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4190 			delay(10);
   4191 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4192 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4193 			CSR_WRITE_FLUSH(sc);
   4194 		}
   4195 		/* check EECD_EE_AUTORD */
   4196 		wm_get_auto_rd_done(sc);
   4197 		/*
   4198 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4199 		 * is set.
   4200 		 */
   4201 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4202 		    || (sc->sc_type == WM_T_82583))
   4203 			delay(25*1000);
   4204 		break;
   4205 	case WM_T_82575:
   4206 	case WM_T_82576:
   4207 	case WM_T_82580:
   4208 	case WM_T_I350:
   4209 	case WM_T_I354:
   4210 	case WM_T_I210:
   4211 	case WM_T_I211:
   4212 	case WM_T_80003:
   4213 		/* check EECD_EE_AUTORD */
   4214 		wm_get_auto_rd_done(sc);
   4215 		break;
   4216 	case WM_T_ICH8:
   4217 	case WM_T_ICH9:
   4218 	case WM_T_ICH10:
   4219 	case WM_T_PCH:
   4220 	case WM_T_PCH2:
   4221 	case WM_T_PCH_LPT:
   4222 	case WM_T_PCH_SPT:
   4223 		break;
   4224 	default:
   4225 		panic("%s: unknown type\n", __func__);
   4226 	}
   4227 
   4228 	/* Check whether EEPROM is present or not */
   4229 	switch (sc->sc_type) {
   4230 	case WM_T_82575:
   4231 	case WM_T_82576:
   4232 	case WM_T_82580:
   4233 	case WM_T_I350:
   4234 	case WM_T_I354:
   4235 	case WM_T_ICH8:
   4236 	case WM_T_ICH9:
   4237 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4238 			/* Not found */
   4239 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4240 			if (sc->sc_type == WM_T_82575)
   4241 				wm_reset_init_script_82575(sc);
   4242 		}
   4243 		break;
   4244 	default:
   4245 		break;
   4246 	}
   4247 
   4248 	if ((sc->sc_type == WM_T_82580)
   4249 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4250 		/* clear global device reset status bit */
   4251 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4252 	}
   4253 
   4254 	/* Clear any pending interrupt events. */
   4255 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4256 	reg = CSR_READ(sc, WMREG_ICR);
   4257 	if (sc->sc_nintrs > 1) {
   4258 		if (sc->sc_type != WM_T_82574) {
   4259 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4260 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4261 		} else
   4262 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4263 	}
   4264 
   4265 	/* reload sc_ctrl */
   4266 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4267 
   4268 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4269 		wm_set_eee_i350(sc);
   4270 
   4271 	/* Clear the host wakeup bit after lcd reset */
   4272 	if (sc->sc_type >= WM_T_PCH) {
   4273 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4274 		    BM_PORT_GEN_CFG);
   4275 		reg &= ~BM_WUC_HOST_WU_BIT;
   4276 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4277 		    BM_PORT_GEN_CFG, reg);
   4278 	}
   4279 
   4280 	/*
   4281 	 * For PCH, this write will make sure that any noise will be detected
   4282 	 * as a CRC error and be dropped rather than show up as a bad packet
   4283 	 * to the DMA engine
   4284 	 */
   4285 	if (sc->sc_type == WM_T_PCH)
   4286 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4287 
   4288 	if (sc->sc_type >= WM_T_82544)
   4289 		CSR_WRITE(sc, WMREG_WUC, 0);
   4290 
   4291 	wm_reset_mdicnfg_82580(sc);
   4292 
   4293 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4294 		wm_pll_workaround_i210(sc);
   4295 }
   4296 
   4297 /*
   4298  * wm_add_rxbuf:
   4299  *
   4300  *	Add a receive buffer to the indiciated descriptor.
   4301  */
   4302 static int
   4303 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4304 {
   4305 	struct wm_softc *sc = rxq->rxq_sc;
   4306 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4307 	struct mbuf *m;
   4308 	int error;
   4309 
   4310 	KASSERT(mutex_owned(rxq->rxq_lock));
   4311 
   4312 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4313 	if (m == NULL)
   4314 		return ENOBUFS;
   4315 
   4316 	MCLGET(m, M_DONTWAIT);
   4317 	if ((m->m_flags & M_EXT) == 0) {
   4318 		m_freem(m);
   4319 		return ENOBUFS;
   4320 	}
   4321 
   4322 	if (rxs->rxs_mbuf != NULL)
   4323 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4324 
   4325 	rxs->rxs_mbuf = m;
   4326 
   4327 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4328 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4329 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4330 	if (error) {
   4331 		/* XXX XXX XXX */
   4332 		aprint_error_dev(sc->sc_dev,
   4333 		    "unable to load rx DMA map %d, error = %d\n",
   4334 		    idx, error);
   4335 		panic("wm_add_rxbuf");
   4336 	}
   4337 
   4338 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4339 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4340 
   4341 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4342 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4343 			wm_init_rxdesc(rxq, idx);
   4344 	} else
   4345 		wm_init_rxdesc(rxq, idx);
   4346 
   4347 	return 0;
   4348 }
   4349 
   4350 /*
   4351  * wm_rxdrain:
   4352  *
   4353  *	Drain the receive queue.
   4354  */
   4355 static void
   4356 wm_rxdrain(struct wm_rxqueue *rxq)
   4357 {
   4358 	struct wm_softc *sc = rxq->rxq_sc;
   4359 	struct wm_rxsoft *rxs;
   4360 	int i;
   4361 
   4362 	KASSERT(mutex_owned(rxq->rxq_lock));
   4363 
   4364 	for (i = 0; i < WM_NRXDESC; i++) {
   4365 		rxs = &rxq->rxq_soft[i];
   4366 		if (rxs->rxs_mbuf != NULL) {
   4367 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4368 			m_freem(rxs->rxs_mbuf);
   4369 			rxs->rxs_mbuf = NULL;
   4370 		}
   4371 	}
   4372 }
   4373 
   4374 
   4375 /*
   4376  * XXX copy from FreeBSD's sys/net/rss_config.c
   4377  */
   4378 /*
   4379  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4380  * effectiveness may be limited by algorithm choice and available entropy
   4381  * during the boot.
   4382  *
   4383  * XXXRW: And that we don't randomize it yet!
   4384  *
   4385  * This is the default Microsoft RSS specification key which is also
   4386  * the Chelsio T5 firmware default key.
   4387  */
   4388 #define RSS_KEYSIZE 40
   4389 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4390 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4391 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4392 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4393 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4394 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4395 };
   4396 
   4397 /*
   4398  * Caller must pass an array of size sizeof(rss_key).
   4399  *
   4400  * XXX
   4401  * As if_ixgbe may use this function, this function should not be
   4402  * if_wm specific function.
   4403  */
   4404 static void
   4405 wm_rss_getkey(uint8_t *key)
   4406 {
   4407 
   4408 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4409 }
   4410 
   4411 /*
   4412  * Setup registers for RSS.
   4413  *
   4414  * XXX not yet VMDq support
   4415  */
   4416 static void
   4417 wm_init_rss(struct wm_softc *sc)
   4418 {
   4419 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4420 	int i;
   4421 
   4422 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4423 
   4424 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4425 		int qid, reta_ent;
   4426 
   4427 		qid  = i % sc->sc_nqueues;
   4428 		switch(sc->sc_type) {
   4429 		case WM_T_82574:
   4430 			reta_ent = __SHIFTIN(qid,
   4431 			    RETA_ENT_QINDEX_MASK_82574);
   4432 			break;
   4433 		case WM_T_82575:
   4434 			reta_ent = __SHIFTIN(qid,
   4435 			    RETA_ENT_QINDEX1_MASK_82575);
   4436 			break;
   4437 		default:
   4438 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4439 			break;
   4440 		}
   4441 
   4442 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4443 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4444 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4445 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4446 	}
   4447 
   4448 	wm_rss_getkey((uint8_t *)rss_key);
   4449 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4450 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4451 
   4452 	if (sc->sc_type == WM_T_82574)
   4453 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4454 	else
   4455 		mrqc = MRQC_ENABLE_RSS_MQ;
   4456 
   4457 	/*
   4458 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4459 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4460 	 */
   4461 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4462 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4463 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4464 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4465 
   4466 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4467 }
   4468 
   4469 /*
   4470  * Adjust TX and RX queue numbers which the system actulally uses.
   4471  *
   4472  * The numbers are affected by below parameters.
   4473  *     - The nubmer of hardware queues
   4474  *     - The number of MSI-X vectors (= "nvectors" argument)
   4475  *     - ncpu
   4476  */
   4477 static void
   4478 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4479 {
   4480 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4481 
   4482 	if (nvectors < 2) {
   4483 		sc->sc_nqueues = 1;
   4484 		return;
   4485 	}
   4486 
   4487 	switch(sc->sc_type) {
   4488 	case WM_T_82572:
   4489 		hw_ntxqueues = 2;
   4490 		hw_nrxqueues = 2;
   4491 		break;
   4492 	case WM_T_82574:
   4493 		hw_ntxqueues = 2;
   4494 		hw_nrxqueues = 2;
   4495 		break;
   4496 	case WM_T_82575:
   4497 		hw_ntxqueues = 4;
   4498 		hw_nrxqueues = 4;
   4499 		break;
   4500 	case WM_T_82576:
   4501 		hw_ntxqueues = 16;
   4502 		hw_nrxqueues = 16;
   4503 		break;
   4504 	case WM_T_82580:
   4505 	case WM_T_I350:
   4506 	case WM_T_I354:
   4507 		hw_ntxqueues = 8;
   4508 		hw_nrxqueues = 8;
   4509 		break;
   4510 	case WM_T_I210:
   4511 		hw_ntxqueues = 4;
   4512 		hw_nrxqueues = 4;
   4513 		break;
   4514 	case WM_T_I211:
   4515 		hw_ntxqueues = 2;
   4516 		hw_nrxqueues = 2;
   4517 		break;
   4518 		/*
   4519 		 * As below ethernet controllers does not support MSI-X,
   4520 		 * this driver let them not use multiqueue.
   4521 		 *     - WM_T_80003
   4522 		 *     - WM_T_ICH8
   4523 		 *     - WM_T_ICH9
   4524 		 *     - WM_T_ICH10
   4525 		 *     - WM_T_PCH
   4526 		 *     - WM_T_PCH2
   4527 		 *     - WM_T_PCH_LPT
   4528 		 */
   4529 	default:
   4530 		hw_ntxqueues = 1;
   4531 		hw_nrxqueues = 1;
   4532 		break;
   4533 	}
   4534 
   4535 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4536 
   4537 	/*
   4538 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4539 	 * the number of queues used actually.
   4540 	 */
   4541 	if (nvectors < hw_nqueues + 1) {
   4542 		sc->sc_nqueues = nvectors - 1;
   4543 	} else {
   4544 		sc->sc_nqueues = hw_nqueues;
   4545 	}
   4546 
   4547 	/*
   4548 	 * As queues more then cpus cannot improve scaling, we limit
   4549 	 * the number of queues used actually.
   4550 	 */
   4551 	if (ncpu < sc->sc_nqueues)
   4552 		sc->sc_nqueues = ncpu;
   4553 }
   4554 
   4555 /*
   4556  * Both single interrupt MSI and INTx can use this function.
   4557  */
   4558 static int
   4559 wm_setup_legacy(struct wm_softc *sc)
   4560 {
   4561 	pci_chipset_tag_t pc = sc->sc_pc;
   4562 	const char *intrstr = NULL;
   4563 	char intrbuf[PCI_INTRSTR_LEN];
   4564 	int error;
   4565 
   4566 	error = wm_alloc_txrx_queues(sc);
   4567 	if (error) {
   4568 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4569 		    error);
   4570 		return ENOMEM;
   4571 	}
   4572 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4573 	    sizeof(intrbuf));
   4574 #ifdef WM_MPSAFE
   4575 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4576 #endif
   4577 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4578 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4579 	if (sc->sc_ihs[0] == NULL) {
   4580 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4581 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4582 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4583 		return ENOMEM;
   4584 	}
   4585 
   4586 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4587 	sc->sc_nintrs = 1;
   4588 	return 0;
   4589 }
   4590 
   4591 static int
   4592 wm_setup_msix(struct wm_softc *sc)
   4593 {
   4594 	void *vih;
   4595 	kcpuset_t *affinity;
   4596 	int qidx, error, intr_idx, txrx_established;
   4597 	pci_chipset_tag_t pc = sc->sc_pc;
   4598 	const char *intrstr = NULL;
   4599 	char intrbuf[PCI_INTRSTR_LEN];
   4600 	char intr_xname[INTRDEVNAMEBUF];
   4601 
   4602 	if (sc->sc_nqueues < ncpu) {
   4603 		/*
   4604 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4605 		 * interrupts start from CPU#1.
   4606 		 */
   4607 		sc->sc_affinity_offset = 1;
   4608 	} else {
   4609 		/*
   4610 		 * In this case, this device use all CPUs. So, we unify
   4611 		 * affinitied cpu_index to msix vector number for readability.
   4612 		 */
   4613 		sc->sc_affinity_offset = 0;
   4614 	}
   4615 
   4616 	error = wm_alloc_txrx_queues(sc);
   4617 	if (error) {
   4618 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4619 		    error);
   4620 		return ENOMEM;
   4621 	}
   4622 
   4623 	kcpuset_create(&affinity, false);
   4624 	intr_idx = 0;
   4625 
   4626 	/*
   4627 	 * TX and RX
   4628 	 */
   4629 	txrx_established = 0;
   4630 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4631 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4632 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4633 
   4634 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4635 		    sizeof(intrbuf));
   4636 #ifdef WM_MPSAFE
   4637 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4638 		    PCI_INTR_MPSAFE, true);
   4639 #endif
   4640 		memset(intr_xname, 0, sizeof(intr_xname));
   4641 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4642 		    device_xname(sc->sc_dev), qidx);
   4643 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4644 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4645 		if (vih == NULL) {
   4646 			aprint_error_dev(sc->sc_dev,
   4647 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4648 			    intrstr ? " at " : "",
   4649 			    intrstr ? intrstr : "");
   4650 
   4651 			goto fail;
   4652 		}
   4653 		kcpuset_zero(affinity);
   4654 		/* Round-robin affinity */
   4655 		kcpuset_set(affinity, affinity_to);
   4656 		error = interrupt_distribute(vih, affinity, NULL);
   4657 		if (error == 0) {
   4658 			aprint_normal_dev(sc->sc_dev,
   4659 			    "for TX and RX interrupting at %s affinity to %u\n",
   4660 			    intrstr, affinity_to);
   4661 		} else {
   4662 			aprint_normal_dev(sc->sc_dev,
   4663 			    "for TX and RX interrupting at %s\n", intrstr);
   4664 		}
   4665 		sc->sc_ihs[intr_idx] = vih;
   4666 		wmq->wmq_id= qidx;
   4667 		wmq->wmq_intr_idx = intr_idx;
   4668 
   4669 		txrx_established++;
   4670 		intr_idx++;
   4671 	}
   4672 
   4673 	/*
   4674 	 * LINK
   4675 	 */
   4676 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4677 	    sizeof(intrbuf));
   4678 #ifdef WM_MPSAFE
   4679 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4680 #endif
   4681 	memset(intr_xname, 0, sizeof(intr_xname));
   4682 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4683 	    device_xname(sc->sc_dev));
   4684 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4685 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4686 	if (vih == NULL) {
   4687 		aprint_error_dev(sc->sc_dev,
   4688 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4689 		    intrstr ? " at " : "",
   4690 		    intrstr ? intrstr : "");
   4691 
   4692 		goto fail;
   4693 	}
   4694 	/* keep default affinity to LINK interrupt */
   4695 	aprint_normal_dev(sc->sc_dev,
   4696 	    "for LINK interrupting at %s\n", intrstr);
   4697 	sc->sc_ihs[intr_idx] = vih;
   4698 	sc->sc_link_intr_idx = intr_idx;
   4699 
   4700 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4701 	kcpuset_destroy(affinity);
   4702 	return 0;
   4703 
   4704  fail:
   4705 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4706 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4707 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4708 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4709 	}
   4710 
   4711 	kcpuset_destroy(affinity);
   4712 	return ENOMEM;
   4713 }
   4714 
   4715 static void
   4716 wm_turnon(struct wm_softc *sc)
   4717 {
   4718 	int i;
   4719 
   4720 	KASSERT(WM_CORE_LOCKED(sc));
   4721 
   4722 	for(i = 0; i < sc->sc_nqueues; i++) {
   4723 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4724 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4725 
   4726 		mutex_enter(txq->txq_lock);
   4727 		txq->txq_stopping = false;
   4728 		mutex_exit(txq->txq_lock);
   4729 
   4730 		mutex_enter(rxq->rxq_lock);
   4731 		rxq->rxq_stopping = false;
   4732 		mutex_exit(rxq->rxq_lock);
   4733 	}
   4734 
   4735 	sc->sc_core_stopping = false;
   4736 }
   4737 
   4738 static void
   4739 wm_turnoff(struct wm_softc *sc)
   4740 {
   4741 	int i;
   4742 
   4743 	KASSERT(WM_CORE_LOCKED(sc));
   4744 
   4745 	sc->sc_core_stopping = true;
   4746 
   4747 	for(i = 0; i < sc->sc_nqueues; i++) {
   4748 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4749 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4750 
   4751 		mutex_enter(rxq->rxq_lock);
   4752 		rxq->rxq_stopping = true;
   4753 		mutex_exit(rxq->rxq_lock);
   4754 
   4755 		mutex_enter(txq->txq_lock);
   4756 		txq->txq_stopping = true;
   4757 		mutex_exit(txq->txq_lock);
   4758 	}
   4759 }
   4760 
   4761 /*
   4762  * wm_init:		[ifnet interface function]
   4763  *
   4764  *	Initialize the interface.
   4765  */
   4766 static int
   4767 wm_init(struct ifnet *ifp)
   4768 {
   4769 	struct wm_softc *sc = ifp->if_softc;
   4770 	int ret;
   4771 
   4772 	WM_CORE_LOCK(sc);
   4773 	ret = wm_init_locked(ifp);
   4774 	WM_CORE_UNLOCK(sc);
   4775 
   4776 	return ret;
   4777 }
   4778 
   4779 static int
   4780 wm_init_locked(struct ifnet *ifp)
   4781 {
   4782 	struct wm_softc *sc = ifp->if_softc;
   4783 	int i, j, trynum, error = 0;
   4784 	uint32_t reg;
   4785 
   4786 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4787 		device_xname(sc->sc_dev), __func__));
   4788 	KASSERT(WM_CORE_LOCKED(sc));
   4789 
   4790 	/*
   4791 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4792 	 * There is a small but measurable benefit to avoiding the adjusment
   4793 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4794 	 * on such platforms.  One possibility is that the DMA itself is
   4795 	 * slightly more efficient if the front of the entire packet (instead
   4796 	 * of the front of the headers) is aligned.
   4797 	 *
   4798 	 * Note we must always set align_tweak to 0 if we are using
   4799 	 * jumbo frames.
   4800 	 */
   4801 #ifdef __NO_STRICT_ALIGNMENT
   4802 	sc->sc_align_tweak = 0;
   4803 #else
   4804 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4805 		sc->sc_align_tweak = 0;
   4806 	else
   4807 		sc->sc_align_tweak = 2;
   4808 #endif /* __NO_STRICT_ALIGNMENT */
   4809 
   4810 	/* Cancel any pending I/O. */
   4811 	wm_stop_locked(ifp, 0);
   4812 
   4813 	/* update statistics before reset */
   4814 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4815 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4816 
   4817 	/* PCH_SPT hardware workaround */
   4818 	if (sc->sc_type == WM_T_PCH_SPT)
   4819 		wm_flush_desc_rings(sc);
   4820 
   4821 	/* Reset the chip to a known state. */
   4822 	wm_reset(sc);
   4823 
   4824 	/* AMT based hardware can now take control from firmware */
   4825 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4826 		wm_get_hw_control(sc);
   4827 
   4828 	/* Init hardware bits */
   4829 	wm_initialize_hardware_bits(sc);
   4830 
   4831 	/* Reset the PHY. */
   4832 	if (sc->sc_flags & WM_F_HAS_MII)
   4833 		wm_gmii_reset(sc);
   4834 
   4835 	/* Calculate (E)ITR value */
   4836 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4837 		sc->sc_itr = 450;	/* For EITR */
   4838 	} else if (sc->sc_type >= WM_T_82543) {
   4839 		/*
   4840 		 * Set up the interrupt throttling register (units of 256ns)
   4841 		 * Note that a footnote in Intel's documentation says this
   4842 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4843 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4844 		 * that that is also true for the 1024ns units of the other
   4845 		 * interrupt-related timer registers -- so, really, we ought
   4846 		 * to divide this value by 4 when the link speed is low.
   4847 		 *
   4848 		 * XXX implement this division at link speed change!
   4849 		 */
   4850 
   4851 		/*
   4852 		 * For N interrupts/sec, set this value to:
   4853 		 * 1000000000 / (N * 256).  Note that we set the
   4854 		 * absolute and packet timer values to this value
   4855 		 * divided by 4 to get "simple timer" behavior.
   4856 		 */
   4857 
   4858 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4859 	}
   4860 
   4861 	error = wm_init_txrx_queues(sc);
   4862 	if (error)
   4863 		goto out;
   4864 
   4865 	/*
   4866 	 * Clear out the VLAN table -- we don't use it (yet).
   4867 	 */
   4868 	CSR_WRITE(sc, WMREG_VET, 0);
   4869 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4870 		trynum = 10; /* Due to hw errata */
   4871 	else
   4872 		trynum = 1;
   4873 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4874 		for (j = 0; j < trynum; j++)
   4875 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4876 
   4877 	/*
   4878 	 * Set up flow-control parameters.
   4879 	 *
   4880 	 * XXX Values could probably stand some tuning.
   4881 	 */
   4882 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4883 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4884 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4885 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4886 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4887 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4888 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4889 	}
   4890 
   4891 	sc->sc_fcrtl = FCRTL_DFLT;
   4892 	if (sc->sc_type < WM_T_82543) {
   4893 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4894 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4895 	} else {
   4896 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4897 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4898 	}
   4899 
   4900 	if (sc->sc_type == WM_T_80003)
   4901 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4902 	else
   4903 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4904 
   4905 	/* Writes the control register. */
   4906 	wm_set_vlan(sc);
   4907 
   4908 	if (sc->sc_flags & WM_F_HAS_MII) {
   4909 		int val;
   4910 
   4911 		switch (sc->sc_type) {
   4912 		case WM_T_80003:
   4913 		case WM_T_ICH8:
   4914 		case WM_T_ICH9:
   4915 		case WM_T_ICH10:
   4916 		case WM_T_PCH:
   4917 		case WM_T_PCH2:
   4918 		case WM_T_PCH_LPT:
   4919 		case WM_T_PCH_SPT:
   4920 			/*
   4921 			 * Set the mac to wait the maximum time between each
   4922 			 * iteration and increase the max iterations when
   4923 			 * polling the phy; this fixes erroneous timeouts at
   4924 			 * 10Mbps.
   4925 			 */
   4926 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4927 			    0xFFFF);
   4928 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4929 			val |= 0x3F;
   4930 			wm_kmrn_writereg(sc,
   4931 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4932 			break;
   4933 		default:
   4934 			break;
   4935 		}
   4936 
   4937 		if (sc->sc_type == WM_T_80003) {
   4938 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4939 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4940 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4941 
   4942 			/* Bypass RX and TX FIFO's */
   4943 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4944 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4945 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4946 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4947 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4948 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4949 		}
   4950 	}
   4951 #if 0
   4952 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4953 #endif
   4954 
   4955 	/* Set up checksum offload parameters. */
   4956 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4957 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4958 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4959 		reg |= RXCSUM_IPOFL;
   4960 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4961 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4962 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4963 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4964 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4965 
   4966 	/* Set up MSI-X */
   4967 	if (sc->sc_nintrs > 1) {
   4968 		uint32_t ivar;
   4969 		struct wm_queue *wmq;
   4970 		int qid, qintr_idx;
   4971 
   4972 		if (sc->sc_type == WM_T_82575) {
   4973 			/* Interrupt control */
   4974 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4975 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4976 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4977 
   4978 			/* TX and RX */
   4979 			for (i = 0; i < sc->sc_nqueues; i++) {
   4980 				wmq = &sc->sc_queue[i];
   4981 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4982 				    EITR_TX_QUEUE(wmq->wmq_id)
   4983 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4984 			}
   4985 			/* Link status */
   4986 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4987 			    EITR_OTHER);
   4988 		} else if (sc->sc_type == WM_T_82574) {
   4989 			/* Interrupt control */
   4990 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4991 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4992 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4993 
   4994 			ivar = 0;
   4995 			/* TX and RX */
   4996 			for (i = 0; i < sc->sc_nqueues; i++) {
   4997 				wmq = &sc->sc_queue[i];
   4998 				qid = wmq->wmq_id;
   4999 				qintr_idx = wmq->wmq_intr_idx;
   5000 
   5001 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5002 				    IVAR_TX_MASK_Q_82574(qid));
   5003 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5004 				    IVAR_RX_MASK_Q_82574(qid));
   5005 			}
   5006 			/* Link status */
   5007 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5008 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5009 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5010 		} else {
   5011 			/* Interrupt control */
   5012 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5013 			    | GPIE_EIAME | GPIE_PBA);
   5014 
   5015 			switch (sc->sc_type) {
   5016 			case WM_T_82580:
   5017 			case WM_T_I350:
   5018 			case WM_T_I354:
   5019 			case WM_T_I210:
   5020 			case WM_T_I211:
   5021 				/* TX and RX */
   5022 				for (i = 0; i < sc->sc_nqueues; i++) {
   5023 					wmq = &sc->sc_queue[i];
   5024 					qid = wmq->wmq_id;
   5025 					qintr_idx = wmq->wmq_intr_idx;
   5026 
   5027 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5028 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5029 					ivar |= __SHIFTIN((qintr_idx
   5030 						| IVAR_VALID),
   5031 					    IVAR_TX_MASK_Q(qid));
   5032 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5033 					ivar |= __SHIFTIN((qintr_idx
   5034 						| IVAR_VALID),
   5035 					    IVAR_RX_MASK_Q(qid));
   5036 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5037 				}
   5038 				break;
   5039 			case WM_T_82576:
   5040 				/* TX and RX */
   5041 				for (i = 0; i < sc->sc_nqueues; i++) {
   5042 					wmq = &sc->sc_queue[i];
   5043 					qid = wmq->wmq_id;
   5044 					qintr_idx = wmq->wmq_intr_idx;
   5045 
   5046 					ivar = CSR_READ(sc,
   5047 					    WMREG_IVAR_Q_82576(qid));
   5048 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5049 					ivar |= __SHIFTIN((qintr_idx
   5050 						| IVAR_VALID),
   5051 					    IVAR_TX_MASK_Q_82576(qid));
   5052 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5053 					ivar |= __SHIFTIN((qintr_idx
   5054 						| IVAR_VALID),
   5055 					    IVAR_RX_MASK_Q_82576(qid));
   5056 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5057 					    ivar);
   5058 				}
   5059 				break;
   5060 			default:
   5061 				break;
   5062 			}
   5063 
   5064 			/* Link status */
   5065 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5066 			    IVAR_MISC_OTHER);
   5067 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5068 		}
   5069 
   5070 		if (sc->sc_nqueues > 1) {
   5071 			wm_init_rss(sc);
   5072 
   5073 			/*
   5074 			** NOTE: Receive Full-Packet Checksum Offload
   5075 			** is mutually exclusive with Multiqueue. However
   5076 			** this is not the same as TCP/IP checksums which
   5077 			** still work.
   5078 			*/
   5079 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5080 			reg |= RXCSUM_PCSD;
   5081 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5082 		}
   5083 	}
   5084 
   5085 	/* Set up the interrupt registers. */
   5086 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5087 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5088 	    ICR_RXO | ICR_RXT0;
   5089 	if (sc->sc_nintrs > 1) {
   5090 		uint32_t mask;
   5091 		struct wm_queue *wmq;
   5092 
   5093 		switch (sc->sc_type) {
   5094 		case WM_T_82574:
   5095 			CSR_WRITE(sc, WMREG_EIAC_82574,
   5096 			    WMREG_EIAC_82574_MSIX_MASK);
   5097 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   5098 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5099 			break;
   5100 		default:
   5101 			if (sc->sc_type == WM_T_82575) {
   5102 				mask = 0;
   5103 				for (i = 0; i < sc->sc_nqueues; i++) {
   5104 					wmq = &sc->sc_queue[i];
   5105 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5106 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5107 				}
   5108 				mask |= EITR_OTHER;
   5109 			} else {
   5110 				mask = 0;
   5111 				for (i = 0; i < sc->sc_nqueues; i++) {
   5112 					wmq = &sc->sc_queue[i];
   5113 					mask |= 1 << wmq->wmq_intr_idx;
   5114 				}
   5115 				mask |= 1 << sc->sc_link_intr_idx;
   5116 			}
   5117 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5118 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5119 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5120 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5121 			break;
   5122 		}
   5123 	} else
   5124 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5125 
   5126 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5127 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5128 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5129 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   5130 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5131 		reg |= KABGTXD_BGSQLBIAS;
   5132 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5133 	}
   5134 
   5135 	/* Set up the inter-packet gap. */
   5136 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5137 
   5138 	if (sc->sc_type >= WM_T_82543) {
   5139 		/*
   5140 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   5141 		 * the multi queue function with MSI-X.
   5142 		 */
   5143 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5144 			int qidx;
   5145 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5146 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   5147 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   5148 				    sc->sc_itr);
   5149 			}
   5150 			/*
   5151 			 * Link interrupts occur much less than TX
   5152 			 * interrupts and RX interrupts. So, we don't
   5153 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5154 			 * FreeBSD's if_igb.
   5155 			 */
   5156 		} else
   5157 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   5158 	}
   5159 
   5160 	/* Set the VLAN ethernetype. */
   5161 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5162 
   5163 	/*
   5164 	 * Set up the transmit control register; we start out with
   5165 	 * a collision distance suitable for FDX, but update it whe
   5166 	 * we resolve the media type.
   5167 	 */
   5168 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5169 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5170 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5171 	if (sc->sc_type >= WM_T_82571)
   5172 		sc->sc_tctl |= TCTL_MULR;
   5173 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5174 
   5175 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5176 		/* Write TDT after TCTL.EN is set. See the document. */
   5177 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5178 	}
   5179 
   5180 	if (sc->sc_type == WM_T_80003) {
   5181 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5182 		reg &= ~TCTL_EXT_GCEX_MASK;
   5183 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5184 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5185 	}
   5186 
   5187 	/* Set the media. */
   5188 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5189 		goto out;
   5190 
   5191 	/* Configure for OS presence */
   5192 	wm_init_manageability(sc);
   5193 
   5194 	/*
   5195 	 * Set up the receive control register; we actually program
   5196 	 * the register when we set the receive filter.  Use multicast
   5197 	 * address offset type 0.
   5198 	 *
   5199 	 * Only the i82544 has the ability to strip the incoming
   5200 	 * CRC, so we don't enable that feature.
   5201 	 */
   5202 	sc->sc_mchash_type = 0;
   5203 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5204 	    | RCTL_MO(sc->sc_mchash_type);
   5205 
   5206 	/*
   5207 	 * 82574 use one buffer extended Rx descriptor.
   5208 	 */
   5209 	if (sc->sc_type == WM_T_82574)
   5210 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5211 
   5212 	/*
   5213 	 * The I350 has a bug where it always strips the CRC whether
   5214 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5215 	 */
   5216 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5217 	    || (sc->sc_type == WM_T_I210))
   5218 		sc->sc_rctl |= RCTL_SECRC;
   5219 
   5220 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5221 	    && (ifp->if_mtu > ETHERMTU)) {
   5222 		sc->sc_rctl |= RCTL_LPE;
   5223 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5224 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5225 	}
   5226 
   5227 	if (MCLBYTES == 2048) {
   5228 		sc->sc_rctl |= RCTL_2k;
   5229 	} else {
   5230 		if (sc->sc_type >= WM_T_82543) {
   5231 			switch (MCLBYTES) {
   5232 			case 4096:
   5233 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5234 				break;
   5235 			case 8192:
   5236 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5237 				break;
   5238 			case 16384:
   5239 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5240 				break;
   5241 			default:
   5242 				panic("wm_init: MCLBYTES %d unsupported",
   5243 				    MCLBYTES);
   5244 				break;
   5245 			}
   5246 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5247 	}
   5248 
   5249 	/* Set the receive filter. */
   5250 	wm_set_filter(sc);
   5251 
   5252 	/* Enable ECC */
   5253 	switch (sc->sc_type) {
   5254 	case WM_T_82571:
   5255 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5256 		reg |= PBA_ECC_CORR_EN;
   5257 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5258 		break;
   5259 	case WM_T_PCH_LPT:
   5260 	case WM_T_PCH_SPT:
   5261 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5262 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5263 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5264 
   5265 		sc->sc_ctrl |= CTRL_MEHE;
   5266 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5267 		break;
   5268 	default:
   5269 		break;
   5270 	}
   5271 
   5272 	/* On 575 and later set RDT only if RX enabled */
   5273 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5274 		int qidx;
   5275 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5276 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5277 			for (i = 0; i < WM_NRXDESC; i++) {
   5278 				mutex_enter(rxq->rxq_lock);
   5279 				wm_init_rxdesc(rxq, i);
   5280 				mutex_exit(rxq->rxq_lock);
   5281 
   5282 			}
   5283 		}
   5284 	}
   5285 
   5286 	wm_turnon(sc);
   5287 
   5288 	/* Start the one second link check clock. */
   5289 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5290 
   5291 	/* ...all done! */
   5292 	ifp->if_flags |= IFF_RUNNING;
   5293 	ifp->if_flags &= ~IFF_OACTIVE;
   5294 
   5295  out:
   5296 	sc->sc_if_flags = ifp->if_flags;
   5297 	if (error)
   5298 		log(LOG_ERR, "%s: interface not running\n",
   5299 		    device_xname(sc->sc_dev));
   5300 	return error;
   5301 }
   5302 
   5303 /*
   5304  * wm_stop:		[ifnet interface function]
   5305  *
   5306  *	Stop transmission on the interface.
   5307  */
   5308 static void
   5309 wm_stop(struct ifnet *ifp, int disable)
   5310 {
   5311 	struct wm_softc *sc = ifp->if_softc;
   5312 
   5313 	WM_CORE_LOCK(sc);
   5314 	wm_stop_locked(ifp, disable);
   5315 	WM_CORE_UNLOCK(sc);
   5316 }
   5317 
   5318 static void
   5319 wm_stop_locked(struct ifnet *ifp, int disable)
   5320 {
   5321 	struct wm_softc *sc = ifp->if_softc;
   5322 	struct wm_txsoft *txs;
   5323 	int i, qidx;
   5324 
   5325 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5326 		device_xname(sc->sc_dev), __func__));
   5327 	KASSERT(WM_CORE_LOCKED(sc));
   5328 
   5329 	wm_turnoff(sc);
   5330 
   5331 	/* Stop the one second clock. */
   5332 	callout_stop(&sc->sc_tick_ch);
   5333 
   5334 	/* Stop the 82547 Tx FIFO stall check timer. */
   5335 	if (sc->sc_type == WM_T_82547)
   5336 		callout_stop(&sc->sc_txfifo_ch);
   5337 
   5338 	if (sc->sc_flags & WM_F_HAS_MII) {
   5339 		/* Down the MII. */
   5340 		mii_down(&sc->sc_mii);
   5341 	} else {
   5342 #if 0
   5343 		/* Should we clear PHY's status properly? */
   5344 		wm_reset(sc);
   5345 #endif
   5346 	}
   5347 
   5348 	/* Stop the transmit and receive processes. */
   5349 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5350 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5351 	sc->sc_rctl &= ~RCTL_EN;
   5352 
   5353 	/*
   5354 	 * Clear the interrupt mask to ensure the device cannot assert its
   5355 	 * interrupt line.
   5356 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5357 	 * service any currently pending or shared interrupt.
   5358 	 */
   5359 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5360 	sc->sc_icr = 0;
   5361 	if (sc->sc_nintrs > 1) {
   5362 		if (sc->sc_type != WM_T_82574) {
   5363 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5364 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5365 		} else
   5366 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5367 	}
   5368 
   5369 	/* Release any queued transmit buffers. */
   5370 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5371 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5372 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5373 		mutex_enter(txq->txq_lock);
   5374 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5375 			txs = &txq->txq_soft[i];
   5376 			if (txs->txs_mbuf != NULL) {
   5377 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5378 				m_freem(txs->txs_mbuf);
   5379 				txs->txs_mbuf = NULL;
   5380 			}
   5381 		}
   5382 		mutex_exit(txq->txq_lock);
   5383 	}
   5384 
   5385 	/* Mark the interface as down and cancel the watchdog timer. */
   5386 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5387 	ifp->if_timer = 0;
   5388 
   5389 	if (disable) {
   5390 		for (i = 0; i < sc->sc_nqueues; i++) {
   5391 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5392 			mutex_enter(rxq->rxq_lock);
   5393 			wm_rxdrain(rxq);
   5394 			mutex_exit(rxq->rxq_lock);
   5395 		}
   5396 	}
   5397 
   5398 #if 0 /* notyet */
   5399 	if (sc->sc_type >= WM_T_82544)
   5400 		CSR_WRITE(sc, WMREG_WUC, 0);
   5401 #endif
   5402 }
   5403 
   5404 static void
   5405 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5406 {
   5407 	struct mbuf *m;
   5408 	int i;
   5409 
   5410 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5411 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5412 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5413 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5414 		    m->m_data, m->m_len, m->m_flags);
   5415 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5416 	    i, i == 1 ? "" : "s");
   5417 }
   5418 
   5419 /*
   5420  * wm_82547_txfifo_stall:
   5421  *
   5422  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5423  *	reset the FIFO pointers, and restart packet transmission.
   5424  */
   5425 static void
   5426 wm_82547_txfifo_stall(void *arg)
   5427 {
   5428 	struct wm_softc *sc = arg;
   5429 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5430 
   5431 	mutex_enter(txq->txq_lock);
   5432 
   5433 	if (txq->txq_stopping)
   5434 		goto out;
   5435 
   5436 	if (txq->txq_fifo_stall) {
   5437 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5438 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5439 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5440 			/*
   5441 			 * Packets have drained.  Stop transmitter, reset
   5442 			 * FIFO pointers, restart transmitter, and kick
   5443 			 * the packet queue.
   5444 			 */
   5445 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5446 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5447 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5448 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5449 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5450 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5451 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5452 			CSR_WRITE_FLUSH(sc);
   5453 
   5454 			txq->txq_fifo_head = 0;
   5455 			txq->txq_fifo_stall = 0;
   5456 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5457 		} else {
   5458 			/*
   5459 			 * Still waiting for packets to drain; try again in
   5460 			 * another tick.
   5461 			 */
   5462 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5463 		}
   5464 	}
   5465 
   5466 out:
   5467 	mutex_exit(txq->txq_lock);
   5468 }
   5469 
   5470 /*
   5471  * wm_82547_txfifo_bugchk:
   5472  *
   5473  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5474  *	prevent enqueueing a packet that would wrap around the end
   5475  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5476  *
   5477  *	We do this by checking the amount of space before the end
   5478  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5479  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5480  *	the internal FIFO pointers to the beginning, and restart
   5481  *	transmission on the interface.
   5482  */
   5483 #define	WM_FIFO_HDR		0x10
   5484 #define	WM_82547_PAD_LEN	0x3e0
   5485 static int
   5486 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5487 {
   5488 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5489 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5490 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5491 
   5492 	/* Just return if already stalled. */
   5493 	if (txq->txq_fifo_stall)
   5494 		return 1;
   5495 
   5496 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5497 		/* Stall only occurs in half-duplex mode. */
   5498 		goto send_packet;
   5499 	}
   5500 
   5501 	if (len >= WM_82547_PAD_LEN + space) {
   5502 		txq->txq_fifo_stall = 1;
   5503 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5504 		return 1;
   5505 	}
   5506 
   5507  send_packet:
   5508 	txq->txq_fifo_head += len;
   5509 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5510 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5511 
   5512 	return 0;
   5513 }
   5514 
   5515 static int
   5516 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5517 {
   5518 	int error;
   5519 
   5520 	/*
   5521 	 * Allocate the control data structures, and create and load the
   5522 	 * DMA map for it.
   5523 	 *
   5524 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5525 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5526 	 * both sets within the same 4G segment.
   5527 	 */
   5528 	if (sc->sc_type < WM_T_82544)
   5529 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5530 	else
   5531 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5532 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5533 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5534 	else
   5535 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5536 
   5537 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5538 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5539 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5540 		aprint_error_dev(sc->sc_dev,
   5541 		    "unable to allocate TX control data, error = %d\n",
   5542 		    error);
   5543 		goto fail_0;
   5544 	}
   5545 
   5546 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5547 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5548 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5549 		aprint_error_dev(sc->sc_dev,
   5550 		    "unable to map TX control data, error = %d\n", error);
   5551 		goto fail_1;
   5552 	}
   5553 
   5554 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5555 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5556 		aprint_error_dev(sc->sc_dev,
   5557 		    "unable to create TX control data DMA map, error = %d\n",
   5558 		    error);
   5559 		goto fail_2;
   5560 	}
   5561 
   5562 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5563 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5564 		aprint_error_dev(sc->sc_dev,
   5565 		    "unable to load TX control data DMA map, error = %d\n",
   5566 		    error);
   5567 		goto fail_3;
   5568 	}
   5569 
   5570 	return 0;
   5571 
   5572  fail_3:
   5573 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5574  fail_2:
   5575 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5576 	    WM_TXDESCS_SIZE(txq));
   5577  fail_1:
   5578 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5579  fail_0:
   5580 	return error;
   5581 }
   5582 
   5583 static void
   5584 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5585 {
   5586 
   5587 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5588 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5589 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5590 	    WM_TXDESCS_SIZE(txq));
   5591 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5592 }
   5593 
   5594 static int
   5595 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5596 {
   5597 	int error;
   5598 	size_t rxq_descs_size;
   5599 
   5600 	/*
   5601 	 * Allocate the control data structures, and create and load the
   5602 	 * DMA map for it.
   5603 	 *
   5604 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5605 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5606 	 * both sets within the same 4G segment.
   5607 	 */
   5608 	rxq->rxq_ndesc = WM_NRXDESC;
   5609 	if (sc->sc_type == WM_T_82574)
   5610 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   5611 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5612 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   5613 	else
   5614 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   5615 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   5616 
   5617 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   5618 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5619 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5620 		aprint_error_dev(sc->sc_dev,
   5621 		    "unable to allocate RX control data, error = %d\n",
   5622 		    error);
   5623 		goto fail_0;
   5624 	}
   5625 
   5626 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5627 		    rxq->rxq_desc_rseg, rxq_descs_size,
   5628 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5629 		aprint_error_dev(sc->sc_dev,
   5630 		    "unable to map RX control data, error = %d\n", error);
   5631 		goto fail_1;
   5632 	}
   5633 
   5634 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   5635 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5636 		aprint_error_dev(sc->sc_dev,
   5637 		    "unable to create RX control data DMA map, error = %d\n",
   5638 		    error);
   5639 		goto fail_2;
   5640 	}
   5641 
   5642 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5643 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   5644 		aprint_error_dev(sc->sc_dev,
   5645 		    "unable to load RX control data DMA map, error = %d\n",
   5646 		    error);
   5647 		goto fail_3;
   5648 	}
   5649 
   5650 	return 0;
   5651 
   5652  fail_3:
   5653 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5654  fail_2:
   5655 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5656 	    rxq_descs_size);
   5657  fail_1:
   5658 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5659  fail_0:
   5660 	return error;
   5661 }
   5662 
   5663 static void
   5664 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5665 {
   5666 
   5667 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5668 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5669 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5670 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   5671 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5672 }
   5673 
   5674 
   5675 static int
   5676 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5677 {
   5678 	int i, error;
   5679 
   5680 	/* Create the transmit buffer DMA maps. */
   5681 	WM_TXQUEUELEN(txq) =
   5682 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5683 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5684 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5685 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5686 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5687 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5688 			aprint_error_dev(sc->sc_dev,
   5689 			    "unable to create Tx DMA map %d, error = %d\n",
   5690 			    i, error);
   5691 			goto fail;
   5692 		}
   5693 	}
   5694 
   5695 	return 0;
   5696 
   5697  fail:
   5698 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5699 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5700 			bus_dmamap_destroy(sc->sc_dmat,
   5701 			    txq->txq_soft[i].txs_dmamap);
   5702 	}
   5703 	return error;
   5704 }
   5705 
   5706 static void
   5707 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5708 {
   5709 	int i;
   5710 
   5711 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5712 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5713 			bus_dmamap_destroy(sc->sc_dmat,
   5714 			    txq->txq_soft[i].txs_dmamap);
   5715 	}
   5716 }
   5717 
   5718 static int
   5719 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5720 {
   5721 	int i, error;
   5722 
   5723 	/* Create the receive buffer DMA maps. */
   5724 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5725 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5726 			    MCLBYTES, 0, 0,
   5727 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5728 			aprint_error_dev(sc->sc_dev,
   5729 			    "unable to create Rx DMA map %d error = %d\n",
   5730 			    i, error);
   5731 			goto fail;
   5732 		}
   5733 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5734 	}
   5735 
   5736 	return 0;
   5737 
   5738  fail:
   5739 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5740 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5741 			bus_dmamap_destroy(sc->sc_dmat,
   5742 			    rxq->rxq_soft[i].rxs_dmamap);
   5743 	}
   5744 	return error;
   5745 }
   5746 
   5747 static void
   5748 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5749 {
   5750 	int i;
   5751 
   5752 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5753 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5754 			bus_dmamap_destroy(sc->sc_dmat,
   5755 			    rxq->rxq_soft[i].rxs_dmamap);
   5756 	}
   5757 }
   5758 
   5759 /*
   5760  * wm_alloc_quques:
   5761  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5762  */
   5763 static int
   5764 wm_alloc_txrx_queues(struct wm_softc *sc)
   5765 {
   5766 	int i, error, tx_done, rx_done;
   5767 
   5768 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5769 	    KM_SLEEP);
   5770 	if (sc->sc_queue == NULL) {
   5771 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5772 		error = ENOMEM;
   5773 		goto fail_0;
   5774 	}
   5775 
   5776 	/*
   5777 	 * For transmission
   5778 	 */
   5779 	error = 0;
   5780 	tx_done = 0;
   5781 	for (i = 0; i < sc->sc_nqueues; i++) {
   5782 #ifdef WM_EVENT_COUNTERS
   5783 		int j;
   5784 		const char *xname;
   5785 #endif
   5786 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5787 		txq->txq_sc = sc;
   5788 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5789 
   5790 		error = wm_alloc_tx_descs(sc, txq);
   5791 		if (error)
   5792 			break;
   5793 		error = wm_alloc_tx_buffer(sc, txq);
   5794 		if (error) {
   5795 			wm_free_tx_descs(sc, txq);
   5796 			break;
   5797 		}
   5798 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5799 		if (txq->txq_interq == NULL) {
   5800 			wm_free_tx_descs(sc, txq);
   5801 			wm_free_tx_buffer(sc, txq);
   5802 			error = ENOMEM;
   5803 			break;
   5804 		}
   5805 
   5806 #ifdef WM_EVENT_COUNTERS
   5807 		xname = device_xname(sc->sc_dev);
   5808 
   5809 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5810 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5811 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5812 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5813 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5814 
   5815 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5816 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5817 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5818 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5819 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5820 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5821 
   5822 		for (j = 0; j < WM_NTXSEGS; j++) {
   5823 			snprintf(txq->txq_txseg_evcnt_names[j],
   5824 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   5825 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   5826 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   5827 		}
   5828 
   5829 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   5830 
   5831 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   5832 #endif /* WM_EVENT_COUNTERS */
   5833 
   5834 		tx_done++;
   5835 	}
   5836 	if (error)
   5837 		goto fail_1;
   5838 
   5839 	/*
   5840 	 * For recieve
   5841 	 */
   5842 	error = 0;
   5843 	rx_done = 0;
   5844 	for (i = 0; i < sc->sc_nqueues; i++) {
   5845 #ifdef WM_EVENT_COUNTERS
   5846 		const char *xname;
   5847 #endif
   5848 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5849 		rxq->rxq_sc = sc;
   5850 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5851 
   5852 		error = wm_alloc_rx_descs(sc, rxq);
   5853 		if (error)
   5854 			break;
   5855 
   5856 		error = wm_alloc_rx_buffer(sc, rxq);
   5857 		if (error) {
   5858 			wm_free_rx_descs(sc, rxq);
   5859 			break;
   5860 		}
   5861 
   5862 #ifdef WM_EVENT_COUNTERS
   5863 		xname = device_xname(sc->sc_dev);
   5864 
   5865 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   5866 
   5867 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   5868 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   5869 #endif /* WM_EVENT_COUNTERS */
   5870 
   5871 		rx_done++;
   5872 	}
   5873 	if (error)
   5874 		goto fail_2;
   5875 
   5876 	return 0;
   5877 
   5878  fail_2:
   5879 	for (i = 0; i < rx_done; i++) {
   5880 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5881 		wm_free_rx_buffer(sc, rxq);
   5882 		wm_free_rx_descs(sc, rxq);
   5883 		if (rxq->rxq_lock)
   5884 			mutex_obj_free(rxq->rxq_lock);
   5885 	}
   5886  fail_1:
   5887 	for (i = 0; i < tx_done; i++) {
   5888 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5889 		pcq_destroy(txq->txq_interq);
   5890 		wm_free_tx_buffer(sc, txq);
   5891 		wm_free_tx_descs(sc, txq);
   5892 		if (txq->txq_lock)
   5893 			mutex_obj_free(txq->txq_lock);
   5894 	}
   5895 
   5896 	kmem_free(sc->sc_queue,
   5897 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5898  fail_0:
   5899 	return error;
   5900 }
   5901 
   5902 /*
   5903  * wm_free_quques:
   5904  *	Free {tx,rx}descs and {tx,rx} buffers
   5905  */
   5906 static void
   5907 wm_free_txrx_queues(struct wm_softc *sc)
   5908 {
   5909 	int i;
   5910 
   5911 	for (i = 0; i < sc->sc_nqueues; i++) {
   5912 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5913 		wm_free_rx_buffer(sc, rxq);
   5914 		wm_free_rx_descs(sc, rxq);
   5915 		if (rxq->rxq_lock)
   5916 			mutex_obj_free(rxq->rxq_lock);
   5917 	}
   5918 
   5919 	for (i = 0; i < sc->sc_nqueues; i++) {
   5920 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5921 		wm_free_tx_buffer(sc, txq);
   5922 		wm_free_tx_descs(sc, txq);
   5923 		if (txq->txq_lock)
   5924 			mutex_obj_free(txq->txq_lock);
   5925 	}
   5926 
   5927 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5928 }
   5929 
   5930 static void
   5931 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5932 {
   5933 
   5934 	KASSERT(mutex_owned(txq->txq_lock));
   5935 
   5936 	/* Initialize the transmit descriptor ring. */
   5937 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5938 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5939 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5940 	txq->txq_free = WM_NTXDESC(txq);
   5941 	txq->txq_next = 0;
   5942 }
   5943 
   5944 static void
   5945 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5946     struct wm_txqueue *txq)
   5947 {
   5948 
   5949 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5950 		device_xname(sc->sc_dev), __func__));
   5951 	KASSERT(mutex_owned(txq->txq_lock));
   5952 
   5953 	if (sc->sc_type < WM_T_82543) {
   5954 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5955 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5956 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5957 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5958 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5959 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5960 	} else {
   5961 		int qid = wmq->wmq_id;
   5962 
   5963 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5964 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5965 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5966 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5967 
   5968 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5969 			/*
   5970 			 * Don't write TDT before TCTL.EN is set.
   5971 			 * See the document.
   5972 			 */
   5973 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5974 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5975 			    | TXDCTL_WTHRESH(0));
   5976 		else {
   5977 			/* ITR / 4 */
   5978 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5979 			if (sc->sc_type >= WM_T_82540) {
   5980 				/* should be same */
   5981 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5982 			}
   5983 
   5984 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5985 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5986 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5987 		}
   5988 	}
   5989 }
   5990 
   5991 static void
   5992 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5993 {
   5994 	int i;
   5995 
   5996 	KASSERT(mutex_owned(txq->txq_lock));
   5997 
   5998 	/* Initialize the transmit job descriptors. */
   5999 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6000 		txq->txq_soft[i].txs_mbuf = NULL;
   6001 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6002 	txq->txq_snext = 0;
   6003 	txq->txq_sdirty = 0;
   6004 }
   6005 
   6006 static void
   6007 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6008     struct wm_txqueue *txq)
   6009 {
   6010 
   6011 	KASSERT(mutex_owned(txq->txq_lock));
   6012 
   6013 	/*
   6014 	 * Set up some register offsets that are different between
   6015 	 * the i82542 and the i82543 and later chips.
   6016 	 */
   6017 	if (sc->sc_type < WM_T_82543)
   6018 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6019 	else
   6020 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6021 
   6022 	wm_init_tx_descs(sc, txq);
   6023 	wm_init_tx_regs(sc, wmq, txq);
   6024 	wm_init_tx_buffer(sc, txq);
   6025 }
   6026 
   6027 static void
   6028 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6029     struct wm_rxqueue *rxq)
   6030 {
   6031 
   6032 	KASSERT(mutex_owned(rxq->rxq_lock));
   6033 
   6034 	/*
   6035 	 * Initialize the receive descriptor and receive job
   6036 	 * descriptor rings.
   6037 	 */
   6038 	if (sc->sc_type < WM_T_82543) {
   6039 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6040 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6041 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6042 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6043 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6044 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6045 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6046 
   6047 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6048 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6049 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6050 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6051 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6052 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6053 	} else {
   6054 		int qid = wmq->wmq_id;
   6055 
   6056 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6057 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6058 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6059 
   6060 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6061 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6062 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   6063 
   6064 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6065 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6066 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6067 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6068 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6069 			    | RXDCTL_WTHRESH(1));
   6070 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6071 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6072 		} else {
   6073 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6074 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6075 			/* ITR / 4 */
   6076 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   6077 			/* MUST be same */
   6078 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   6079 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6080 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6081 		}
   6082 	}
   6083 }
   6084 
   6085 static int
   6086 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6087 {
   6088 	struct wm_rxsoft *rxs;
   6089 	int error, i;
   6090 
   6091 	KASSERT(mutex_owned(rxq->rxq_lock));
   6092 
   6093 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6094 		rxs = &rxq->rxq_soft[i];
   6095 		if (rxs->rxs_mbuf == NULL) {
   6096 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6097 				log(LOG_ERR, "%s: unable to allocate or map "
   6098 				    "rx buffer %d, error = %d\n",
   6099 				    device_xname(sc->sc_dev), i, error);
   6100 				/*
   6101 				 * XXX Should attempt to run with fewer receive
   6102 				 * XXX buffers instead of just failing.
   6103 				 */
   6104 				wm_rxdrain(rxq);
   6105 				return ENOMEM;
   6106 			}
   6107 		} else {
   6108 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6109 				wm_init_rxdesc(rxq, i);
   6110 			/*
   6111 			 * For 82575 and newer device, the RX descriptors
   6112 			 * must be initialized after the setting of RCTL.EN in
   6113 			 * wm_set_filter()
   6114 			 */
   6115 		}
   6116 	}
   6117 	rxq->rxq_ptr = 0;
   6118 	rxq->rxq_discard = 0;
   6119 	WM_RXCHAIN_RESET(rxq);
   6120 
   6121 	return 0;
   6122 }
   6123 
   6124 static int
   6125 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6126     struct wm_rxqueue *rxq)
   6127 {
   6128 
   6129 	KASSERT(mutex_owned(rxq->rxq_lock));
   6130 
   6131 	/*
   6132 	 * Set up some register offsets that are different between
   6133 	 * the i82542 and the i82543 and later chips.
   6134 	 */
   6135 	if (sc->sc_type < WM_T_82543)
   6136 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6137 	else
   6138 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6139 
   6140 	wm_init_rx_regs(sc, wmq, rxq);
   6141 	return wm_init_rx_buffer(sc, rxq);
   6142 }
   6143 
   6144 /*
   6145  * wm_init_quques:
   6146  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6147  */
   6148 static int
   6149 wm_init_txrx_queues(struct wm_softc *sc)
   6150 {
   6151 	int i, error = 0;
   6152 
   6153 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6154 		device_xname(sc->sc_dev), __func__));
   6155 
   6156 	for (i = 0; i < sc->sc_nqueues; i++) {
   6157 		struct wm_queue *wmq = &sc->sc_queue[i];
   6158 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6159 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6160 
   6161 		mutex_enter(txq->txq_lock);
   6162 		wm_init_tx_queue(sc, wmq, txq);
   6163 		mutex_exit(txq->txq_lock);
   6164 
   6165 		mutex_enter(rxq->rxq_lock);
   6166 		error = wm_init_rx_queue(sc, wmq, rxq);
   6167 		mutex_exit(rxq->rxq_lock);
   6168 		if (error)
   6169 			break;
   6170 	}
   6171 
   6172 	return error;
   6173 }
   6174 
   6175 /*
   6176  * wm_tx_offload:
   6177  *
   6178  *	Set up TCP/IP checksumming parameters for the
   6179  *	specified packet.
   6180  */
   6181 static int
   6182 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   6183     uint8_t *fieldsp)
   6184 {
   6185 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6186 	struct mbuf *m0 = txs->txs_mbuf;
   6187 	struct livengood_tcpip_ctxdesc *t;
   6188 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6189 	uint32_t ipcse;
   6190 	struct ether_header *eh;
   6191 	int offset, iphl;
   6192 	uint8_t fields;
   6193 
   6194 	/*
   6195 	 * XXX It would be nice if the mbuf pkthdr had offset
   6196 	 * fields for the protocol headers.
   6197 	 */
   6198 
   6199 	eh = mtod(m0, struct ether_header *);
   6200 	switch (htons(eh->ether_type)) {
   6201 	case ETHERTYPE_IP:
   6202 	case ETHERTYPE_IPV6:
   6203 		offset = ETHER_HDR_LEN;
   6204 		break;
   6205 
   6206 	case ETHERTYPE_VLAN:
   6207 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6208 		break;
   6209 
   6210 	default:
   6211 		/*
   6212 		 * Don't support this protocol or encapsulation.
   6213 		 */
   6214 		*fieldsp = 0;
   6215 		*cmdp = 0;
   6216 		return 0;
   6217 	}
   6218 
   6219 	if ((m0->m_pkthdr.csum_flags &
   6220 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6221 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6222 	} else {
   6223 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6224 	}
   6225 	ipcse = offset + iphl - 1;
   6226 
   6227 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6228 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6229 	seg = 0;
   6230 	fields = 0;
   6231 
   6232 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6233 		int hlen = offset + iphl;
   6234 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6235 
   6236 		if (__predict_false(m0->m_len <
   6237 				    (hlen + sizeof(struct tcphdr)))) {
   6238 			/*
   6239 			 * TCP/IP headers are not in the first mbuf; we need
   6240 			 * to do this the slow and painful way.  Let's just
   6241 			 * hope this doesn't happen very often.
   6242 			 */
   6243 			struct tcphdr th;
   6244 
   6245 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6246 
   6247 			m_copydata(m0, hlen, sizeof(th), &th);
   6248 			if (v4) {
   6249 				struct ip ip;
   6250 
   6251 				m_copydata(m0, offset, sizeof(ip), &ip);
   6252 				ip.ip_len = 0;
   6253 				m_copyback(m0,
   6254 				    offset + offsetof(struct ip, ip_len),
   6255 				    sizeof(ip.ip_len), &ip.ip_len);
   6256 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6257 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6258 			} else {
   6259 				struct ip6_hdr ip6;
   6260 
   6261 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6262 				ip6.ip6_plen = 0;
   6263 				m_copyback(m0,
   6264 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6265 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6266 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6267 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6268 			}
   6269 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6270 			    sizeof(th.th_sum), &th.th_sum);
   6271 
   6272 			hlen += th.th_off << 2;
   6273 		} else {
   6274 			/*
   6275 			 * TCP/IP headers are in the first mbuf; we can do
   6276 			 * this the easy way.
   6277 			 */
   6278 			struct tcphdr *th;
   6279 
   6280 			if (v4) {
   6281 				struct ip *ip =
   6282 				    (void *)(mtod(m0, char *) + offset);
   6283 				th = (void *)(mtod(m0, char *) + hlen);
   6284 
   6285 				ip->ip_len = 0;
   6286 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6287 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6288 			} else {
   6289 				struct ip6_hdr *ip6 =
   6290 				    (void *)(mtod(m0, char *) + offset);
   6291 				th = (void *)(mtod(m0, char *) + hlen);
   6292 
   6293 				ip6->ip6_plen = 0;
   6294 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6295 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6296 			}
   6297 			hlen += th->th_off << 2;
   6298 		}
   6299 
   6300 		if (v4) {
   6301 			WM_Q_EVCNT_INCR(txq, txtso);
   6302 			cmdlen |= WTX_TCPIP_CMD_IP;
   6303 		} else {
   6304 			WM_Q_EVCNT_INCR(txq, txtso6);
   6305 			ipcse = 0;
   6306 		}
   6307 		cmd |= WTX_TCPIP_CMD_TSE;
   6308 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6309 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6310 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6311 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6312 	}
   6313 
   6314 	/*
   6315 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6316 	 * offload feature, if we load the context descriptor, we
   6317 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6318 	 */
   6319 
   6320 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6321 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6322 	    WTX_TCPIP_IPCSE(ipcse);
   6323 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6324 		WM_Q_EVCNT_INCR(txq, txipsum);
   6325 		fields |= WTX_IXSM;
   6326 	}
   6327 
   6328 	offset += iphl;
   6329 
   6330 	if (m0->m_pkthdr.csum_flags &
   6331 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6332 		WM_Q_EVCNT_INCR(txq, txtusum);
   6333 		fields |= WTX_TXSM;
   6334 		tucs = WTX_TCPIP_TUCSS(offset) |
   6335 		    WTX_TCPIP_TUCSO(offset +
   6336 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6337 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6338 	} else if ((m0->m_pkthdr.csum_flags &
   6339 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6340 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6341 		fields |= WTX_TXSM;
   6342 		tucs = WTX_TCPIP_TUCSS(offset) |
   6343 		    WTX_TCPIP_TUCSO(offset +
   6344 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6345 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6346 	} else {
   6347 		/* Just initialize it to a valid TCP context. */
   6348 		tucs = WTX_TCPIP_TUCSS(offset) |
   6349 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6350 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6351 	}
   6352 
   6353 	/* Fill in the context descriptor. */
   6354 	t = (struct livengood_tcpip_ctxdesc *)
   6355 	    &txq->txq_descs[txq->txq_next];
   6356 	t->tcpip_ipcs = htole32(ipcs);
   6357 	t->tcpip_tucs = htole32(tucs);
   6358 	t->tcpip_cmdlen = htole32(cmdlen);
   6359 	t->tcpip_seg = htole32(seg);
   6360 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6361 
   6362 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6363 	txs->txs_ndesc++;
   6364 
   6365 	*cmdp = cmd;
   6366 	*fieldsp = fields;
   6367 
   6368 	return 0;
   6369 }
   6370 
   6371 static inline int
   6372 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6373 {
   6374 	struct wm_softc *sc = ifp->if_softc;
   6375 	u_int cpuid = cpu_index(curcpu());
   6376 
   6377 	/*
   6378 	 * Currently, simple distribute strategy.
   6379 	 * TODO:
   6380 	 * distribute by flowid(RSS has value).
   6381 	 */
   6382 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6383 }
   6384 
   6385 /*
   6386  * wm_start:		[ifnet interface function]
   6387  *
   6388  *	Start packet transmission on the interface.
   6389  */
   6390 static void
   6391 wm_start(struct ifnet *ifp)
   6392 {
   6393 	struct wm_softc *sc = ifp->if_softc;
   6394 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6395 
   6396 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6397 
   6398 	/*
   6399 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6400 	 */
   6401 
   6402 	mutex_enter(txq->txq_lock);
   6403 	if (!txq->txq_stopping)
   6404 		wm_start_locked(ifp);
   6405 	mutex_exit(txq->txq_lock);
   6406 }
   6407 
   6408 static void
   6409 wm_start_locked(struct ifnet *ifp)
   6410 {
   6411 	struct wm_softc *sc = ifp->if_softc;
   6412 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6413 
   6414 	wm_send_common_locked(ifp, txq, false);
   6415 }
   6416 
   6417 static int
   6418 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6419 {
   6420 	int qid;
   6421 	struct wm_softc *sc = ifp->if_softc;
   6422 	struct wm_txqueue *txq;
   6423 
   6424 	qid = wm_select_txqueue(ifp, m);
   6425 	txq = &sc->sc_queue[qid].wmq_txq;
   6426 
   6427 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6428 		m_freem(m);
   6429 		WM_Q_EVCNT_INCR(txq, txdrop);
   6430 		return ENOBUFS;
   6431 	}
   6432 
   6433 	/*
   6434 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6435 	 */
   6436 	ifp->if_obytes += m->m_pkthdr.len;
   6437 	if (m->m_flags & M_MCAST)
   6438 		ifp->if_omcasts++;
   6439 
   6440 	if (mutex_tryenter(txq->txq_lock)) {
   6441 		if (!txq->txq_stopping)
   6442 			wm_transmit_locked(ifp, txq);
   6443 		mutex_exit(txq->txq_lock);
   6444 	}
   6445 
   6446 	return 0;
   6447 }
   6448 
   6449 static void
   6450 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6451 {
   6452 
   6453 	wm_send_common_locked(ifp, txq, true);
   6454 }
   6455 
   6456 static void
   6457 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6458     bool is_transmit)
   6459 {
   6460 	struct wm_softc *sc = ifp->if_softc;
   6461 	struct mbuf *m0;
   6462 	struct m_tag *mtag;
   6463 	struct wm_txsoft *txs;
   6464 	bus_dmamap_t dmamap;
   6465 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6466 	bus_addr_t curaddr;
   6467 	bus_size_t seglen, curlen;
   6468 	uint32_t cksumcmd;
   6469 	uint8_t cksumfields;
   6470 
   6471 	KASSERT(mutex_owned(txq->txq_lock));
   6472 
   6473 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6474 		return;
   6475 
   6476 	/* Remember the previous number of free descriptors. */
   6477 	ofree = txq->txq_free;
   6478 
   6479 	/*
   6480 	 * Loop through the send queue, setting up transmit descriptors
   6481 	 * until we drain the queue, or use up all available transmit
   6482 	 * descriptors.
   6483 	 */
   6484 	for (;;) {
   6485 		m0 = NULL;
   6486 
   6487 		/* Get a work queue entry. */
   6488 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6489 			wm_txeof(sc, txq);
   6490 			if (txq->txq_sfree == 0) {
   6491 				DPRINTF(WM_DEBUG_TX,
   6492 				    ("%s: TX: no free job descriptors\n",
   6493 					device_xname(sc->sc_dev)));
   6494 				WM_Q_EVCNT_INCR(txq, txsstall);
   6495 				break;
   6496 			}
   6497 		}
   6498 
   6499 		/* Grab a packet off the queue. */
   6500 		if (is_transmit)
   6501 			m0 = pcq_get(txq->txq_interq);
   6502 		else
   6503 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6504 		if (m0 == NULL)
   6505 			break;
   6506 
   6507 		DPRINTF(WM_DEBUG_TX,
   6508 		    ("%s: TX: have packet to transmit: %p\n",
   6509 		    device_xname(sc->sc_dev), m0));
   6510 
   6511 		txs = &txq->txq_soft[txq->txq_snext];
   6512 		dmamap = txs->txs_dmamap;
   6513 
   6514 		use_tso = (m0->m_pkthdr.csum_flags &
   6515 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6516 
   6517 		/*
   6518 		 * So says the Linux driver:
   6519 		 * The controller does a simple calculation to make sure
   6520 		 * there is enough room in the FIFO before initiating the
   6521 		 * DMA for each buffer.  The calc is:
   6522 		 *	4 = ceil(buffer len / MSS)
   6523 		 * To make sure we don't overrun the FIFO, adjust the max
   6524 		 * buffer len if the MSS drops.
   6525 		 */
   6526 		dmamap->dm_maxsegsz =
   6527 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6528 		    ? m0->m_pkthdr.segsz << 2
   6529 		    : WTX_MAX_LEN;
   6530 
   6531 		/*
   6532 		 * Load the DMA map.  If this fails, the packet either
   6533 		 * didn't fit in the allotted number of segments, or we
   6534 		 * were short on resources.  For the too-many-segments
   6535 		 * case, we simply report an error and drop the packet,
   6536 		 * since we can't sanely copy a jumbo packet to a single
   6537 		 * buffer.
   6538 		 */
   6539 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6540 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6541 		if (error) {
   6542 			if (error == EFBIG) {
   6543 				WM_Q_EVCNT_INCR(txq, txdrop);
   6544 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6545 				    "DMA segments, dropping...\n",
   6546 				    device_xname(sc->sc_dev));
   6547 				wm_dump_mbuf_chain(sc, m0);
   6548 				m_freem(m0);
   6549 				continue;
   6550 			}
   6551 			/*  Short on resources, just stop for now. */
   6552 			DPRINTF(WM_DEBUG_TX,
   6553 			    ("%s: TX: dmamap load failed: %d\n",
   6554 			    device_xname(sc->sc_dev), error));
   6555 			break;
   6556 		}
   6557 
   6558 		segs_needed = dmamap->dm_nsegs;
   6559 		if (use_tso) {
   6560 			/* For sentinel descriptor; see below. */
   6561 			segs_needed++;
   6562 		}
   6563 
   6564 		/*
   6565 		 * Ensure we have enough descriptors free to describe
   6566 		 * the packet.  Note, we always reserve one descriptor
   6567 		 * at the end of the ring due to the semantics of the
   6568 		 * TDT register, plus one more in the event we need
   6569 		 * to load offload context.
   6570 		 */
   6571 		if (segs_needed > txq->txq_free - 2) {
   6572 			/*
   6573 			 * Not enough free descriptors to transmit this
   6574 			 * packet.  We haven't committed anything yet,
   6575 			 * so just unload the DMA map, put the packet
   6576 			 * pack on the queue, and punt.  Notify the upper
   6577 			 * layer that there are no more slots left.
   6578 			 */
   6579 			DPRINTF(WM_DEBUG_TX,
   6580 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6581 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6582 			    segs_needed, txq->txq_free - 1));
   6583 			ifp->if_flags |= IFF_OACTIVE;
   6584 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6585 			WM_Q_EVCNT_INCR(txq, txdstall);
   6586 			break;
   6587 		}
   6588 
   6589 		/*
   6590 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6591 		 * once we know we can transmit the packet, since we
   6592 		 * do some internal FIFO space accounting here.
   6593 		 */
   6594 		if (sc->sc_type == WM_T_82547 &&
   6595 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6596 			DPRINTF(WM_DEBUG_TX,
   6597 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6598 			    device_xname(sc->sc_dev)));
   6599 			ifp->if_flags |= IFF_OACTIVE;
   6600 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6601 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6602 			break;
   6603 		}
   6604 
   6605 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6606 
   6607 		DPRINTF(WM_DEBUG_TX,
   6608 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6609 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6610 
   6611 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6612 
   6613 		/*
   6614 		 * Store a pointer to the packet so that we can free it
   6615 		 * later.
   6616 		 *
   6617 		 * Initially, we consider the number of descriptors the
   6618 		 * packet uses the number of DMA segments.  This may be
   6619 		 * incremented by 1 if we do checksum offload (a descriptor
   6620 		 * is used to set the checksum context).
   6621 		 */
   6622 		txs->txs_mbuf = m0;
   6623 		txs->txs_firstdesc = txq->txq_next;
   6624 		txs->txs_ndesc = segs_needed;
   6625 
   6626 		/* Set up offload parameters for this packet. */
   6627 		if (m0->m_pkthdr.csum_flags &
   6628 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6629 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6630 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6631 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6632 					  &cksumfields) != 0) {
   6633 				/* Error message already displayed. */
   6634 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6635 				continue;
   6636 			}
   6637 		} else {
   6638 			cksumcmd = 0;
   6639 			cksumfields = 0;
   6640 		}
   6641 
   6642 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6643 
   6644 		/* Sync the DMA map. */
   6645 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6646 		    BUS_DMASYNC_PREWRITE);
   6647 
   6648 		/* Initialize the transmit descriptor. */
   6649 		for (nexttx = txq->txq_next, seg = 0;
   6650 		     seg < dmamap->dm_nsegs; seg++) {
   6651 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6652 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6653 			     seglen != 0;
   6654 			     curaddr += curlen, seglen -= curlen,
   6655 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6656 				curlen = seglen;
   6657 
   6658 				/*
   6659 				 * So says the Linux driver:
   6660 				 * Work around for premature descriptor
   6661 				 * write-backs in TSO mode.  Append a
   6662 				 * 4-byte sentinel descriptor.
   6663 				 */
   6664 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6665 				    curlen > 8)
   6666 					curlen -= 4;
   6667 
   6668 				wm_set_dma_addr(
   6669 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6670 				txq->txq_descs[nexttx].wtx_cmdlen
   6671 				    = htole32(cksumcmd | curlen);
   6672 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6673 				    = 0;
   6674 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6675 				    = cksumfields;
   6676 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6677 				lasttx = nexttx;
   6678 
   6679 				DPRINTF(WM_DEBUG_TX,
   6680 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6681 				     "len %#04zx\n",
   6682 				    device_xname(sc->sc_dev), nexttx,
   6683 				    (uint64_t)curaddr, curlen));
   6684 			}
   6685 		}
   6686 
   6687 		KASSERT(lasttx != -1);
   6688 
   6689 		/*
   6690 		 * Set up the command byte on the last descriptor of
   6691 		 * the packet.  If we're in the interrupt delay window,
   6692 		 * delay the interrupt.
   6693 		 */
   6694 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6695 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6696 
   6697 		/*
   6698 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6699 		 * up the descriptor to encapsulate the packet for us.
   6700 		 *
   6701 		 * This is only valid on the last descriptor of the packet.
   6702 		 */
   6703 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6704 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6705 			    htole32(WTX_CMD_VLE);
   6706 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6707 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6708 		}
   6709 
   6710 		txs->txs_lastdesc = lasttx;
   6711 
   6712 		DPRINTF(WM_DEBUG_TX,
   6713 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6714 		    device_xname(sc->sc_dev),
   6715 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6716 
   6717 		/* Sync the descriptors we're using. */
   6718 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6719 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6720 
   6721 		/* Give the packet to the chip. */
   6722 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6723 
   6724 		DPRINTF(WM_DEBUG_TX,
   6725 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6726 
   6727 		DPRINTF(WM_DEBUG_TX,
   6728 		    ("%s: TX: finished transmitting packet, job %d\n",
   6729 		    device_xname(sc->sc_dev), txq->txq_snext));
   6730 
   6731 		/* Advance the tx pointer. */
   6732 		txq->txq_free -= txs->txs_ndesc;
   6733 		txq->txq_next = nexttx;
   6734 
   6735 		txq->txq_sfree--;
   6736 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6737 
   6738 		/* Pass the packet to any BPF listeners. */
   6739 		bpf_mtap(ifp, m0);
   6740 	}
   6741 
   6742 	if (m0 != NULL) {
   6743 		ifp->if_flags |= IFF_OACTIVE;
   6744 		WM_Q_EVCNT_INCR(txq, txdrop);
   6745 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6746 			__func__));
   6747 		m_freem(m0);
   6748 	}
   6749 
   6750 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6751 		/* No more slots; notify upper layer. */
   6752 		ifp->if_flags |= IFF_OACTIVE;
   6753 	}
   6754 
   6755 	if (txq->txq_free != ofree) {
   6756 		/* Set a watchdog timer in case the chip flakes out. */
   6757 		ifp->if_timer = 5;
   6758 	}
   6759 }
   6760 
   6761 /*
   6762  * wm_nq_tx_offload:
   6763  *
   6764  *	Set up TCP/IP checksumming parameters for the
   6765  *	specified packet, for NEWQUEUE devices
   6766  */
   6767 static int
   6768 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6769     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6770 {
   6771 	struct mbuf *m0 = txs->txs_mbuf;
   6772 	struct m_tag *mtag;
   6773 	uint32_t vl_len, mssidx, cmdc;
   6774 	struct ether_header *eh;
   6775 	int offset, iphl;
   6776 
   6777 	/*
   6778 	 * XXX It would be nice if the mbuf pkthdr had offset
   6779 	 * fields for the protocol headers.
   6780 	 */
   6781 	*cmdlenp = 0;
   6782 	*fieldsp = 0;
   6783 
   6784 	eh = mtod(m0, struct ether_header *);
   6785 	switch (htons(eh->ether_type)) {
   6786 	case ETHERTYPE_IP:
   6787 	case ETHERTYPE_IPV6:
   6788 		offset = ETHER_HDR_LEN;
   6789 		break;
   6790 
   6791 	case ETHERTYPE_VLAN:
   6792 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6793 		break;
   6794 
   6795 	default:
   6796 		/* Don't support this protocol or encapsulation. */
   6797 		*do_csum = false;
   6798 		return 0;
   6799 	}
   6800 	*do_csum = true;
   6801 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6802 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6803 
   6804 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6805 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6806 
   6807 	if ((m0->m_pkthdr.csum_flags &
   6808 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6809 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6810 	} else {
   6811 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6812 	}
   6813 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6814 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6815 
   6816 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6817 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6818 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6819 		*cmdlenp |= NQTX_CMD_VLE;
   6820 	}
   6821 
   6822 	mssidx = 0;
   6823 
   6824 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6825 		int hlen = offset + iphl;
   6826 		int tcp_hlen;
   6827 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6828 
   6829 		if (__predict_false(m0->m_len <
   6830 				    (hlen + sizeof(struct tcphdr)))) {
   6831 			/*
   6832 			 * TCP/IP headers are not in the first mbuf; we need
   6833 			 * to do this the slow and painful way.  Let's just
   6834 			 * hope this doesn't happen very often.
   6835 			 */
   6836 			struct tcphdr th;
   6837 
   6838 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6839 
   6840 			m_copydata(m0, hlen, sizeof(th), &th);
   6841 			if (v4) {
   6842 				struct ip ip;
   6843 
   6844 				m_copydata(m0, offset, sizeof(ip), &ip);
   6845 				ip.ip_len = 0;
   6846 				m_copyback(m0,
   6847 				    offset + offsetof(struct ip, ip_len),
   6848 				    sizeof(ip.ip_len), &ip.ip_len);
   6849 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6850 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6851 			} else {
   6852 				struct ip6_hdr ip6;
   6853 
   6854 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6855 				ip6.ip6_plen = 0;
   6856 				m_copyback(m0,
   6857 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6858 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6859 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6860 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6861 			}
   6862 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6863 			    sizeof(th.th_sum), &th.th_sum);
   6864 
   6865 			tcp_hlen = th.th_off << 2;
   6866 		} else {
   6867 			/*
   6868 			 * TCP/IP headers are in the first mbuf; we can do
   6869 			 * this the easy way.
   6870 			 */
   6871 			struct tcphdr *th;
   6872 
   6873 			if (v4) {
   6874 				struct ip *ip =
   6875 				    (void *)(mtod(m0, char *) + offset);
   6876 				th = (void *)(mtod(m0, char *) + hlen);
   6877 
   6878 				ip->ip_len = 0;
   6879 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6880 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6881 			} else {
   6882 				struct ip6_hdr *ip6 =
   6883 				    (void *)(mtod(m0, char *) + offset);
   6884 				th = (void *)(mtod(m0, char *) + hlen);
   6885 
   6886 				ip6->ip6_plen = 0;
   6887 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6888 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6889 			}
   6890 			tcp_hlen = th->th_off << 2;
   6891 		}
   6892 		hlen += tcp_hlen;
   6893 		*cmdlenp |= NQTX_CMD_TSE;
   6894 
   6895 		if (v4) {
   6896 			WM_Q_EVCNT_INCR(txq, txtso);
   6897 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6898 		} else {
   6899 			WM_Q_EVCNT_INCR(txq, txtso6);
   6900 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6901 		}
   6902 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6903 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6904 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6905 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6906 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6907 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6908 	} else {
   6909 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6910 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6911 	}
   6912 
   6913 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6914 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6915 		cmdc |= NQTXC_CMD_IP4;
   6916 	}
   6917 
   6918 	if (m0->m_pkthdr.csum_flags &
   6919 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6920 		WM_Q_EVCNT_INCR(txq, txtusum);
   6921 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6922 			cmdc |= NQTXC_CMD_TCP;
   6923 		} else {
   6924 			cmdc |= NQTXC_CMD_UDP;
   6925 		}
   6926 		cmdc |= NQTXC_CMD_IP4;
   6927 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6928 	}
   6929 	if (m0->m_pkthdr.csum_flags &
   6930 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6931 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6932 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6933 			cmdc |= NQTXC_CMD_TCP;
   6934 		} else {
   6935 			cmdc |= NQTXC_CMD_UDP;
   6936 		}
   6937 		cmdc |= NQTXC_CMD_IP6;
   6938 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6939 	}
   6940 
   6941 	/* Fill in the context descriptor. */
   6942 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6943 	    htole32(vl_len);
   6944 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6945 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6946 	    htole32(cmdc);
   6947 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6948 	    htole32(mssidx);
   6949 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6950 	DPRINTF(WM_DEBUG_TX,
   6951 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6952 	    txq->txq_next, 0, vl_len));
   6953 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6954 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6955 	txs->txs_ndesc++;
   6956 	return 0;
   6957 }
   6958 
   6959 /*
   6960  * wm_nq_start:		[ifnet interface function]
   6961  *
   6962  *	Start packet transmission on the interface for NEWQUEUE devices
   6963  */
   6964 static void
   6965 wm_nq_start(struct ifnet *ifp)
   6966 {
   6967 	struct wm_softc *sc = ifp->if_softc;
   6968 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6969 
   6970 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6971 
   6972 	/*
   6973 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6974 	 */
   6975 
   6976 	mutex_enter(txq->txq_lock);
   6977 	if (!txq->txq_stopping)
   6978 		wm_nq_start_locked(ifp);
   6979 	mutex_exit(txq->txq_lock);
   6980 }
   6981 
   6982 static void
   6983 wm_nq_start_locked(struct ifnet *ifp)
   6984 {
   6985 	struct wm_softc *sc = ifp->if_softc;
   6986 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6987 
   6988 	wm_nq_send_common_locked(ifp, txq, false);
   6989 }
   6990 
   6991 static int
   6992 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   6993 {
   6994 	int qid;
   6995 	struct wm_softc *sc = ifp->if_softc;
   6996 	struct wm_txqueue *txq;
   6997 
   6998 	qid = wm_select_txqueue(ifp, m);
   6999 	txq = &sc->sc_queue[qid].wmq_txq;
   7000 
   7001 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7002 		m_freem(m);
   7003 		WM_Q_EVCNT_INCR(txq, txdrop);
   7004 		return ENOBUFS;
   7005 	}
   7006 
   7007 	/*
   7008 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7009 	 */
   7010 	ifp->if_obytes += m->m_pkthdr.len;
   7011 	if (m->m_flags & M_MCAST)
   7012 		ifp->if_omcasts++;
   7013 
   7014 	if (mutex_tryenter(txq->txq_lock)) {
   7015 		if (!txq->txq_stopping)
   7016 			wm_nq_transmit_locked(ifp, txq);
   7017 		mutex_exit(txq->txq_lock);
   7018 	}
   7019 
   7020 	return 0;
   7021 }
   7022 
   7023 static void
   7024 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7025 {
   7026 
   7027 	wm_nq_send_common_locked(ifp, txq, true);
   7028 }
   7029 
   7030 static void
   7031 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7032     bool is_transmit)
   7033 {
   7034 	struct wm_softc *sc = ifp->if_softc;
   7035 	struct mbuf *m0;
   7036 	struct m_tag *mtag;
   7037 	struct wm_txsoft *txs;
   7038 	bus_dmamap_t dmamap;
   7039 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7040 	bool do_csum, sent;
   7041 
   7042 	KASSERT(mutex_owned(txq->txq_lock));
   7043 
   7044 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   7045 		return;
   7046 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7047 		return;
   7048 
   7049 	sent = false;
   7050 
   7051 	/*
   7052 	 * Loop through the send queue, setting up transmit descriptors
   7053 	 * until we drain the queue, or use up all available transmit
   7054 	 * descriptors.
   7055 	 */
   7056 	for (;;) {
   7057 		m0 = NULL;
   7058 
   7059 		/* Get a work queue entry. */
   7060 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7061 			wm_txeof(sc, txq);
   7062 			if (txq->txq_sfree == 0) {
   7063 				DPRINTF(WM_DEBUG_TX,
   7064 				    ("%s: TX: no free job descriptors\n",
   7065 					device_xname(sc->sc_dev)));
   7066 				WM_Q_EVCNT_INCR(txq, txsstall);
   7067 				break;
   7068 			}
   7069 		}
   7070 
   7071 		/* Grab a packet off the queue. */
   7072 		if (is_transmit)
   7073 			m0 = pcq_get(txq->txq_interq);
   7074 		else
   7075 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7076 		if (m0 == NULL)
   7077 			break;
   7078 
   7079 		DPRINTF(WM_DEBUG_TX,
   7080 		    ("%s: TX: have packet to transmit: %p\n",
   7081 		    device_xname(sc->sc_dev), m0));
   7082 
   7083 		txs = &txq->txq_soft[txq->txq_snext];
   7084 		dmamap = txs->txs_dmamap;
   7085 
   7086 		/*
   7087 		 * Load the DMA map.  If this fails, the packet either
   7088 		 * didn't fit in the allotted number of segments, or we
   7089 		 * were short on resources.  For the too-many-segments
   7090 		 * case, we simply report an error and drop the packet,
   7091 		 * since we can't sanely copy a jumbo packet to a single
   7092 		 * buffer.
   7093 		 */
   7094 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7095 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7096 		if (error) {
   7097 			if (error == EFBIG) {
   7098 				WM_Q_EVCNT_INCR(txq, txdrop);
   7099 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7100 				    "DMA segments, dropping...\n",
   7101 				    device_xname(sc->sc_dev));
   7102 				wm_dump_mbuf_chain(sc, m0);
   7103 				m_freem(m0);
   7104 				continue;
   7105 			}
   7106 			/* Short on resources, just stop for now. */
   7107 			DPRINTF(WM_DEBUG_TX,
   7108 			    ("%s: TX: dmamap load failed: %d\n",
   7109 			    device_xname(sc->sc_dev), error));
   7110 			break;
   7111 		}
   7112 
   7113 		segs_needed = dmamap->dm_nsegs;
   7114 
   7115 		/*
   7116 		 * Ensure we have enough descriptors free to describe
   7117 		 * the packet.  Note, we always reserve one descriptor
   7118 		 * at the end of the ring due to the semantics of the
   7119 		 * TDT register, plus one more in the event we need
   7120 		 * to load offload context.
   7121 		 */
   7122 		if (segs_needed > txq->txq_free - 2) {
   7123 			/*
   7124 			 * Not enough free descriptors to transmit this
   7125 			 * packet.  We haven't committed anything yet,
   7126 			 * so just unload the DMA map, put the packet
   7127 			 * pack on the queue, and punt.  Notify the upper
   7128 			 * layer that there are no more slots left.
   7129 			 */
   7130 			DPRINTF(WM_DEBUG_TX,
   7131 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7132 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7133 			    segs_needed, txq->txq_free - 1));
   7134 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7135 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7136 			WM_Q_EVCNT_INCR(txq, txdstall);
   7137 			break;
   7138 		}
   7139 
   7140 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7141 
   7142 		DPRINTF(WM_DEBUG_TX,
   7143 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7144 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7145 
   7146 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7147 
   7148 		/*
   7149 		 * Store a pointer to the packet so that we can free it
   7150 		 * later.
   7151 		 *
   7152 		 * Initially, we consider the number of descriptors the
   7153 		 * packet uses the number of DMA segments.  This may be
   7154 		 * incremented by 1 if we do checksum offload (a descriptor
   7155 		 * is used to set the checksum context).
   7156 		 */
   7157 		txs->txs_mbuf = m0;
   7158 		txs->txs_firstdesc = txq->txq_next;
   7159 		txs->txs_ndesc = segs_needed;
   7160 
   7161 		/* Set up offload parameters for this packet. */
   7162 		uint32_t cmdlen, fields, dcmdlen;
   7163 		if (m0->m_pkthdr.csum_flags &
   7164 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7165 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7166 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7167 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7168 			    &do_csum) != 0) {
   7169 				/* Error message already displayed. */
   7170 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7171 				continue;
   7172 			}
   7173 		} else {
   7174 			do_csum = false;
   7175 			cmdlen = 0;
   7176 			fields = 0;
   7177 		}
   7178 
   7179 		/* Sync the DMA map. */
   7180 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7181 		    BUS_DMASYNC_PREWRITE);
   7182 
   7183 		/* Initialize the first transmit descriptor. */
   7184 		nexttx = txq->txq_next;
   7185 		if (!do_csum) {
   7186 			/* setup a legacy descriptor */
   7187 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7188 			    dmamap->dm_segs[0].ds_addr);
   7189 			txq->txq_descs[nexttx].wtx_cmdlen =
   7190 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7191 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7192 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7193 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7194 			    NULL) {
   7195 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7196 				    htole32(WTX_CMD_VLE);
   7197 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7198 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7199 			} else {
   7200 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7201 			}
   7202 			dcmdlen = 0;
   7203 		} else {
   7204 			/* setup an advanced data descriptor */
   7205 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7206 			    htole64(dmamap->dm_segs[0].ds_addr);
   7207 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7208 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7209 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7210 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7211 			    htole32(fields);
   7212 			DPRINTF(WM_DEBUG_TX,
   7213 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7214 			    device_xname(sc->sc_dev), nexttx,
   7215 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7216 			DPRINTF(WM_DEBUG_TX,
   7217 			    ("\t 0x%08x%08x\n", fields,
   7218 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7219 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7220 		}
   7221 
   7222 		lasttx = nexttx;
   7223 		nexttx = WM_NEXTTX(txq, nexttx);
   7224 		/*
   7225 		 * fill in the next descriptors. legacy or adcanced format
   7226 		 * is the same here
   7227 		 */
   7228 		for (seg = 1; seg < dmamap->dm_nsegs;
   7229 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7230 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7231 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7232 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7233 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7234 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7235 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7236 			lasttx = nexttx;
   7237 
   7238 			DPRINTF(WM_DEBUG_TX,
   7239 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7240 			     "len %#04zx\n",
   7241 			    device_xname(sc->sc_dev), nexttx,
   7242 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7243 			    dmamap->dm_segs[seg].ds_len));
   7244 		}
   7245 
   7246 		KASSERT(lasttx != -1);
   7247 
   7248 		/*
   7249 		 * Set up the command byte on the last descriptor of
   7250 		 * the packet.  If we're in the interrupt delay window,
   7251 		 * delay the interrupt.
   7252 		 */
   7253 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7254 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7255 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7256 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7257 
   7258 		txs->txs_lastdesc = lasttx;
   7259 
   7260 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7261 		    device_xname(sc->sc_dev),
   7262 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7263 
   7264 		/* Sync the descriptors we're using. */
   7265 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7266 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7267 
   7268 		/* Give the packet to the chip. */
   7269 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7270 		sent = true;
   7271 
   7272 		DPRINTF(WM_DEBUG_TX,
   7273 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7274 
   7275 		DPRINTF(WM_DEBUG_TX,
   7276 		    ("%s: TX: finished transmitting packet, job %d\n",
   7277 		    device_xname(sc->sc_dev), txq->txq_snext));
   7278 
   7279 		/* Advance the tx pointer. */
   7280 		txq->txq_free -= txs->txs_ndesc;
   7281 		txq->txq_next = nexttx;
   7282 
   7283 		txq->txq_sfree--;
   7284 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7285 
   7286 		/* Pass the packet to any BPF listeners. */
   7287 		bpf_mtap(ifp, m0);
   7288 	}
   7289 
   7290 	if (m0 != NULL) {
   7291 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7292 		WM_Q_EVCNT_INCR(txq, txdrop);
   7293 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7294 			__func__));
   7295 		m_freem(m0);
   7296 	}
   7297 
   7298 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7299 		/* No more slots; notify upper layer. */
   7300 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7301 	}
   7302 
   7303 	if (sent) {
   7304 		/* Set a watchdog timer in case the chip flakes out. */
   7305 		ifp->if_timer = 5;
   7306 	}
   7307 }
   7308 
   7309 static void
   7310 wm_deferred_start(struct ifnet *ifp)
   7311 {
   7312 	struct wm_softc *sc = ifp->if_softc;
   7313 	int qid = 0;
   7314 
   7315 	/*
   7316 	 * Try to transmit on all Tx queues. Passing a txq somehow and
   7317 	 * transmitting only on the txq may be better.
   7318 	 */
   7319 restart:
   7320 	WM_CORE_LOCK(sc);
   7321 	if (sc->sc_core_stopping)
   7322 		goto out;
   7323 
   7324 	for (; qid < sc->sc_nqueues; qid++) {
   7325 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   7326 
   7327 		if (!mutex_tryenter(txq->txq_lock))
   7328 			continue;
   7329 
   7330 		if (txq->txq_stopping) {
   7331 			mutex_exit(txq->txq_lock);
   7332 			continue;
   7333 		}
   7334 		WM_CORE_UNLOCK(sc);
   7335 
   7336 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7337 			/* XXX need for ALTQ */
   7338 			if (qid == 0)
   7339 				wm_nq_start_locked(ifp);
   7340 			wm_nq_transmit_locked(ifp, txq);
   7341 		} else {
   7342 			/* XXX need for ALTQ */
   7343 			if (qid == 0)
   7344 				wm_start_locked(ifp);
   7345 			wm_transmit_locked(ifp, txq);
   7346 		}
   7347 		mutex_exit(txq->txq_lock);
   7348 
   7349 		qid++;
   7350 		goto restart;
   7351 	}
   7352 out:
   7353 	WM_CORE_UNLOCK(sc);
   7354 }
   7355 
   7356 /* Interrupt */
   7357 
   7358 /*
   7359  * wm_txeof:
   7360  *
   7361  *	Helper; handle transmit interrupts.
   7362  */
   7363 static int
   7364 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7365 {
   7366 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7367 	struct wm_txsoft *txs;
   7368 	bool processed = false;
   7369 	int count = 0;
   7370 	int i;
   7371 	uint8_t status;
   7372 
   7373 	KASSERT(mutex_owned(txq->txq_lock));
   7374 
   7375 	if (txq->txq_stopping)
   7376 		return 0;
   7377 
   7378 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7379 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7380 	else
   7381 		ifp->if_flags &= ~IFF_OACTIVE;
   7382 
   7383 	/*
   7384 	 * Go through the Tx list and free mbufs for those
   7385 	 * frames which have been transmitted.
   7386 	 */
   7387 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7388 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7389 		txs = &txq->txq_soft[i];
   7390 
   7391 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7392 			device_xname(sc->sc_dev), i));
   7393 
   7394 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7395 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7396 
   7397 		status =
   7398 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7399 		if ((status & WTX_ST_DD) == 0) {
   7400 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7401 			    BUS_DMASYNC_PREREAD);
   7402 			break;
   7403 		}
   7404 
   7405 		processed = true;
   7406 		count++;
   7407 		DPRINTF(WM_DEBUG_TX,
   7408 		    ("%s: TX: job %d done: descs %d..%d\n",
   7409 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7410 		    txs->txs_lastdesc));
   7411 
   7412 		/*
   7413 		 * XXX We should probably be using the statistics
   7414 		 * XXX registers, but I don't know if they exist
   7415 		 * XXX on chips before the i82544.
   7416 		 */
   7417 
   7418 #ifdef WM_EVENT_COUNTERS
   7419 		if (status & WTX_ST_TU)
   7420 			WM_Q_EVCNT_INCR(txq, tu);
   7421 #endif /* WM_EVENT_COUNTERS */
   7422 
   7423 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7424 			ifp->if_oerrors++;
   7425 			if (status & WTX_ST_LC)
   7426 				log(LOG_WARNING, "%s: late collision\n",
   7427 				    device_xname(sc->sc_dev));
   7428 			else if (status & WTX_ST_EC) {
   7429 				ifp->if_collisions += 16;
   7430 				log(LOG_WARNING, "%s: excessive collisions\n",
   7431 				    device_xname(sc->sc_dev));
   7432 			}
   7433 		} else
   7434 			ifp->if_opackets++;
   7435 
   7436 		txq->txq_free += txs->txs_ndesc;
   7437 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7438 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7439 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7440 		m_freem(txs->txs_mbuf);
   7441 		txs->txs_mbuf = NULL;
   7442 	}
   7443 
   7444 	/* Update the dirty transmit buffer pointer. */
   7445 	txq->txq_sdirty = i;
   7446 	DPRINTF(WM_DEBUG_TX,
   7447 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7448 
   7449 	if (count != 0)
   7450 		rnd_add_uint32(&sc->rnd_source, count);
   7451 
   7452 	/*
   7453 	 * If there are no more pending transmissions, cancel the watchdog
   7454 	 * timer.
   7455 	 */
   7456 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7457 		ifp->if_timer = 0;
   7458 
   7459 	return processed;
   7460 }
   7461 
   7462 static inline uint32_t
   7463 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7464 {
   7465 	struct wm_softc *sc = rxq->rxq_sc;
   7466 
   7467 	if (sc->sc_type == WM_T_82574)
   7468 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7469 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7470 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7471 	else
   7472 		return rxq->rxq_descs[idx].wrx_status;
   7473 }
   7474 
   7475 static inline uint32_t
   7476 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7477 {
   7478 	struct wm_softc *sc = rxq->rxq_sc;
   7479 
   7480 	if (sc->sc_type == WM_T_82574)
   7481 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7482 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7483 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7484 	else
   7485 		return rxq->rxq_descs[idx].wrx_errors;
   7486 }
   7487 
   7488 static inline uint16_t
   7489 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7490 {
   7491 	struct wm_softc *sc = rxq->rxq_sc;
   7492 
   7493 	if (sc->sc_type == WM_T_82574)
   7494 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7495 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7496 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7497 	else
   7498 		return rxq->rxq_descs[idx].wrx_special;
   7499 }
   7500 
   7501 static inline int
   7502 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7503 {
   7504 	struct wm_softc *sc = rxq->rxq_sc;
   7505 
   7506 	if (sc->sc_type == WM_T_82574)
   7507 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   7508 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7509 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   7510 	else
   7511 		return rxq->rxq_descs[idx].wrx_len;
   7512 }
   7513 
   7514 #ifdef WM_DEBUG
   7515 static inline uint32_t
   7516 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   7517 {
   7518 	struct wm_softc *sc = rxq->rxq_sc;
   7519 
   7520 	if (sc->sc_type == WM_T_82574)
   7521 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   7522 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7523 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   7524 	else
   7525 		return 0;
   7526 }
   7527 
   7528 static inline uint8_t
   7529 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   7530 {
   7531 	struct wm_softc *sc = rxq->rxq_sc;
   7532 
   7533 	if (sc->sc_type == WM_T_82574)
   7534 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   7535 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7536 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   7537 	else
   7538 		return 0;
   7539 }
   7540 #endif /* WM_DEBUG */
   7541 
   7542 static inline bool
   7543 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   7544     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7545 {
   7546 
   7547 	if (sc->sc_type == WM_T_82574)
   7548 		return (status & ext_bit) != 0;
   7549 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7550 		return (status & nq_bit) != 0;
   7551 	else
   7552 		return (status & legacy_bit) != 0;
   7553 }
   7554 
   7555 static inline bool
   7556 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   7557     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7558 {
   7559 
   7560 	if (sc->sc_type == WM_T_82574)
   7561 		return (error & ext_bit) != 0;
   7562 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7563 		return (error & nq_bit) != 0;
   7564 	else
   7565 		return (error & legacy_bit) != 0;
   7566 }
   7567 
   7568 static inline bool
   7569 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   7570 {
   7571 
   7572 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7573 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   7574 		return true;
   7575 	else
   7576 		return false;
   7577 }
   7578 
   7579 static inline bool
   7580 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   7581 {
   7582 	struct wm_softc *sc = rxq->rxq_sc;
   7583 
   7584 	/* XXXX missing error bit for newqueue? */
   7585 	if (wm_rxdesc_is_set_error(sc, errors,
   7586 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   7587 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   7588 		NQRXC_ERROR_RXE)) {
   7589 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   7590 			log(LOG_WARNING, "%s: symbol error\n",
   7591 			    device_xname(sc->sc_dev));
   7592 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   7593 			log(LOG_WARNING, "%s: receive sequence error\n",
   7594 			    device_xname(sc->sc_dev));
   7595 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   7596 			log(LOG_WARNING, "%s: CRC error\n",
   7597 			    device_xname(sc->sc_dev));
   7598 		return true;
   7599 	}
   7600 
   7601 	return false;
   7602 }
   7603 
   7604 static inline bool
   7605 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   7606 {
   7607 	struct wm_softc *sc = rxq->rxq_sc;
   7608 
   7609 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   7610 		NQRXC_STATUS_DD)) {
   7611 		/* We have processed all of the receive descriptors. */
   7612 		struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   7613 
   7614 		if (sc->sc_type == WM_T_82574) {
   7615 			rxq->rxq_ext_descs[idx].erx_data.erxd_addr =
   7616 				htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr
   7617 				    + sc->sc_align_tweak);
   7618 		} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7619 			rxq->rxq_nq_descs[idx].nqrx_data.nrxd_paddr =
   7620 				htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr
   7621 				    + sc->sc_align_tweak);
   7622 		}
   7623 
   7624 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   7625 		return false;
   7626 	}
   7627 
   7628 	return true;
   7629 }
   7630 
   7631 static inline bool
   7632 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   7633     struct mbuf *m)
   7634 {
   7635 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   7636 
   7637 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7638 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   7639 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   7640 	}
   7641 
   7642 	return true;
   7643 }
   7644 
   7645 static inline void
   7646 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   7647     uint32_t errors, struct mbuf *m)
   7648 {
   7649 	struct wm_softc *sc = rxq->rxq_sc;
   7650 
   7651 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   7652 		if (wm_rxdesc_is_set_status(sc, status,
   7653 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   7654 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   7655 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7656 			if (wm_rxdesc_is_set_error(sc, errors,
   7657 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   7658 				m->m_pkthdr.csum_flags |=
   7659 					M_CSUM_IPv4_BAD;
   7660 		}
   7661 		if (wm_rxdesc_is_set_status(sc, status,
   7662 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   7663 			/*
   7664 			 * Note: we don't know if this was TCP or UDP,
   7665 			 * so we just set both bits, and expect the
   7666 			 * upper layers to deal.
   7667 			 */
   7668 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   7669 			m->m_pkthdr.csum_flags |=
   7670 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7671 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7672 			if (wm_rxdesc_is_set_error(sc, errors,
   7673 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   7674 				m->m_pkthdr.csum_flags |=
   7675 					M_CSUM_TCP_UDP_BAD;
   7676 		}
   7677 	}
   7678 }
   7679 
   7680 /*
   7681  * wm_rxeof:
   7682  *
   7683  *	Helper; handle receive interrupts.
   7684  */
   7685 static void
   7686 wm_rxeof(struct wm_rxqueue *rxq)
   7687 {
   7688 	struct wm_softc *sc = rxq->rxq_sc;
   7689 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7690 	struct wm_rxsoft *rxs;
   7691 	struct mbuf *m;
   7692 	int i, len;
   7693 	int count = 0;
   7694 	uint32_t status, errors;
   7695 	uint16_t vlantag;
   7696 	uint32_t rsshash __debugused;
   7697 	uint8_t rsstype  __debugused;
   7698 
   7699 	KASSERT(mutex_owned(rxq->rxq_lock));
   7700 
   7701 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7702 		rxs = &rxq->rxq_soft[i];
   7703 
   7704 		DPRINTF(WM_DEBUG_RX,
   7705 		    ("%s: RX: checking descriptor %d\n",
   7706 		    device_xname(sc->sc_dev), i));
   7707 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7708 
   7709 		status = wm_rxdesc_get_status(rxq, i);
   7710 		errors = wm_rxdesc_get_errors(rxq, i);
   7711 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   7712 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   7713 #ifdef WM_DEBUG
   7714 		rsshash = wm_rxdesc_get_rsshash(rxq, i);
   7715 		rsstype = wm_rxdesc_get_rsstype(rxq, i);
   7716 #endif
   7717 
   7718 		if (!wm_rxdesc_dd(rxq, i, status))
   7719 			break;
   7720 
   7721 		count++;
   7722 		if (__predict_false(rxq->rxq_discard)) {
   7723 			DPRINTF(WM_DEBUG_RX,
   7724 			    ("%s: RX: discarding contents of descriptor %d\n",
   7725 			    device_xname(sc->sc_dev), i));
   7726 			wm_init_rxdesc(rxq, i);
   7727 			if (wm_rxdesc_is_eop(rxq, status)) {
   7728 				/* Reset our state. */
   7729 				DPRINTF(WM_DEBUG_RX,
   7730 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7731 				    device_xname(sc->sc_dev)));
   7732 				rxq->rxq_discard = 0;
   7733 			}
   7734 			continue;
   7735 		}
   7736 
   7737 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7738 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7739 
   7740 		m = rxs->rxs_mbuf;
   7741 
   7742 		/*
   7743 		 * Add a new receive buffer to the ring, unless of
   7744 		 * course the length is zero. Treat the latter as a
   7745 		 * failed mapping.
   7746 		 */
   7747 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7748 			/*
   7749 			 * Failed, throw away what we've done so
   7750 			 * far, and discard the rest of the packet.
   7751 			 */
   7752 			ifp->if_ierrors++;
   7753 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7754 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7755 			wm_init_rxdesc(rxq, i);
   7756 			if (!wm_rxdesc_is_eop(rxq, status))
   7757 				rxq->rxq_discard = 1;
   7758 			if (rxq->rxq_head != NULL)
   7759 				m_freem(rxq->rxq_head);
   7760 			WM_RXCHAIN_RESET(rxq);
   7761 			DPRINTF(WM_DEBUG_RX,
   7762 			    ("%s: RX: Rx buffer allocation failed, "
   7763 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7764 			    rxq->rxq_discard ? " (discard)" : ""));
   7765 			continue;
   7766 		}
   7767 
   7768 		m->m_len = len;
   7769 		rxq->rxq_len += len;
   7770 		DPRINTF(WM_DEBUG_RX,
   7771 		    ("%s: RX: buffer at %p len %d\n",
   7772 		    device_xname(sc->sc_dev), m->m_data, len));
   7773 
   7774 		/* If this is not the end of the packet, keep looking. */
   7775 		if (!wm_rxdesc_is_eop(rxq, status)) {
   7776 			WM_RXCHAIN_LINK(rxq, m);
   7777 			DPRINTF(WM_DEBUG_RX,
   7778 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7779 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7780 			continue;
   7781 		}
   7782 
   7783 		/*
   7784 		 * Okay, we have the entire packet now.  The chip is
   7785 		 * configured to include the FCS except I350 and I21[01]
   7786 		 * (not all chips can be configured to strip it),
   7787 		 * so we need to trim it.
   7788 		 * May need to adjust length of previous mbuf in the
   7789 		 * chain if the current mbuf is too short.
   7790 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7791 		 * is always set in I350, so we don't trim it.
   7792 		 */
   7793 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7794 		    && (sc->sc_type != WM_T_I210)
   7795 		    && (sc->sc_type != WM_T_I211)) {
   7796 			if (m->m_len < ETHER_CRC_LEN) {
   7797 				rxq->rxq_tail->m_len
   7798 				    -= (ETHER_CRC_LEN - m->m_len);
   7799 				m->m_len = 0;
   7800 			} else
   7801 				m->m_len -= ETHER_CRC_LEN;
   7802 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7803 		} else
   7804 			len = rxq->rxq_len;
   7805 
   7806 		WM_RXCHAIN_LINK(rxq, m);
   7807 
   7808 		*rxq->rxq_tailp = NULL;
   7809 		m = rxq->rxq_head;
   7810 
   7811 		WM_RXCHAIN_RESET(rxq);
   7812 
   7813 		DPRINTF(WM_DEBUG_RX,
   7814 		    ("%s: RX: have entire packet, len -> %d\n",
   7815 		    device_xname(sc->sc_dev), len));
   7816 
   7817 		/* If an error occurred, update stats and drop the packet. */
   7818 		if (wm_rxdesc_has_errors(rxq, errors)) {
   7819 			m_freem(m);
   7820 			continue;
   7821 		}
   7822 
   7823 		/* No errors.  Receive the packet. */
   7824 		m_set_rcvif(m, ifp);
   7825 		m->m_pkthdr.len = len;
   7826                 /*
   7827                  * TODO
   7828                  * should be save rsshash and rsstype to this mbuf.
   7829                  */
   7830                 DPRINTF(WM_DEBUG_RX,
   7831                     ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   7832                         device_xname(sc->sc_dev), rsstype, rsshash));
   7833 
   7834 		/*
   7835 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7836 		 * for us.  Associate the tag with the packet.
   7837 		 */
   7838 		/* XXXX should check for i350 and i354 */
   7839 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   7840 			continue;
   7841 
   7842 		/* Set up checksum info for this packet. */
   7843 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   7844 
   7845 		mutex_exit(rxq->rxq_lock);
   7846 
   7847 		/* Pass it on. */
   7848 		if_percpuq_enqueue(sc->sc_ipq, m);
   7849 
   7850 		mutex_enter(rxq->rxq_lock);
   7851 
   7852 		if (rxq->rxq_stopping)
   7853 			break;
   7854 	}
   7855 
   7856 	/* Update the receive pointer. */
   7857 	rxq->rxq_ptr = i;
   7858 	if (count != 0)
   7859 		rnd_add_uint32(&sc->rnd_source, count);
   7860 
   7861 	DPRINTF(WM_DEBUG_RX,
   7862 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7863 }
   7864 
   7865 /*
   7866  * wm_linkintr_gmii:
   7867  *
   7868  *	Helper; handle link interrupts for GMII.
   7869  */
   7870 static void
   7871 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7872 {
   7873 
   7874 	KASSERT(WM_CORE_LOCKED(sc));
   7875 
   7876 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7877 		__func__));
   7878 
   7879 	if (icr & ICR_LSC) {
   7880 		uint32_t reg;
   7881 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7882 
   7883 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7884 			wm_gig_downshift_workaround_ich8lan(sc);
   7885 
   7886 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7887 			device_xname(sc->sc_dev)));
   7888 		mii_pollstat(&sc->sc_mii);
   7889 		if (sc->sc_type == WM_T_82543) {
   7890 			int miistatus, active;
   7891 
   7892 			/*
   7893 			 * With 82543, we need to force speed and
   7894 			 * duplex on the MAC equal to what the PHY
   7895 			 * speed and duplex configuration is.
   7896 			 */
   7897 			miistatus = sc->sc_mii.mii_media_status;
   7898 
   7899 			if (miistatus & IFM_ACTIVE) {
   7900 				active = sc->sc_mii.mii_media_active;
   7901 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7902 				switch (IFM_SUBTYPE(active)) {
   7903 				case IFM_10_T:
   7904 					sc->sc_ctrl |= CTRL_SPEED_10;
   7905 					break;
   7906 				case IFM_100_TX:
   7907 					sc->sc_ctrl |= CTRL_SPEED_100;
   7908 					break;
   7909 				case IFM_1000_T:
   7910 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7911 					break;
   7912 				default:
   7913 					/*
   7914 					 * fiber?
   7915 					 * Shoud not enter here.
   7916 					 */
   7917 					printf("unknown media (%x)\n", active);
   7918 					break;
   7919 				}
   7920 				if (active & IFM_FDX)
   7921 					sc->sc_ctrl |= CTRL_FD;
   7922 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7923 			}
   7924 		} else if ((sc->sc_type == WM_T_ICH8)
   7925 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7926 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7927 		} else if (sc->sc_type == WM_T_PCH) {
   7928 			wm_k1_gig_workaround_hv(sc,
   7929 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7930 		}
   7931 
   7932 		if ((sc->sc_phytype == WMPHY_82578)
   7933 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7934 			== IFM_1000_T)) {
   7935 
   7936 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7937 				delay(200*1000); /* XXX too big */
   7938 
   7939 				/* Link stall fix for link up */
   7940 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7941 				    HV_MUX_DATA_CTRL,
   7942 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7943 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7944 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7945 				    HV_MUX_DATA_CTRL,
   7946 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7947 			}
   7948 		}
   7949 		/*
   7950 		 * I217 Packet Loss issue:
   7951 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   7952 		 * on power up.
   7953 		 * Set the Beacon Duration for I217 to 8 usec
   7954 		 */
   7955 		if ((sc->sc_type == WM_T_PCH_LPT)
   7956 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   7957 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   7958 			reg &= ~FEXTNVM4_BEACON_DURATION;
   7959 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   7960 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   7961 		}
   7962 
   7963 		/* XXX Work-around I218 hang issue */
   7964 		/* e1000_k1_workaround_lpt_lp() */
   7965 
   7966 		if ((sc->sc_type == WM_T_PCH_LPT)
   7967 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   7968 			/*
   7969 			 * Set platform power management values for Latency
   7970 			 * Tolerance Reporting (LTR)
   7971 			 */
   7972 			wm_platform_pm_pch_lpt(sc,
   7973 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   7974 				    != 0));
   7975 		}
   7976 
   7977 		/* FEXTNVM6 K1-off workaround */
   7978 		if (sc->sc_type == WM_T_PCH_SPT) {
   7979 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   7980 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   7981 			    & FEXTNVM6_K1_OFF_ENABLE)
   7982 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   7983 			else
   7984 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   7985 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   7986 		}
   7987 	} else if (icr & ICR_RXSEQ) {
   7988 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7989 			device_xname(sc->sc_dev)));
   7990 	}
   7991 }
   7992 
   7993 /*
   7994  * wm_linkintr_tbi:
   7995  *
   7996  *	Helper; handle link interrupts for TBI mode.
   7997  */
   7998 static void
   7999 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8000 {
   8001 	uint32_t status;
   8002 
   8003 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8004 		__func__));
   8005 
   8006 	status = CSR_READ(sc, WMREG_STATUS);
   8007 	if (icr & ICR_LSC) {
   8008 		if (status & STATUS_LU) {
   8009 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8010 			    device_xname(sc->sc_dev),
   8011 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8012 			/*
   8013 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8014 			 * so we should update sc->sc_ctrl
   8015 			 */
   8016 
   8017 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8018 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8019 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8020 			if (status & STATUS_FD)
   8021 				sc->sc_tctl |=
   8022 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8023 			else
   8024 				sc->sc_tctl |=
   8025 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8026 			if (sc->sc_ctrl & CTRL_TFCE)
   8027 				sc->sc_fcrtl |= FCRTL_XONE;
   8028 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8029 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8030 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8031 				      sc->sc_fcrtl);
   8032 			sc->sc_tbi_linkup = 1;
   8033 		} else {
   8034 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8035 			    device_xname(sc->sc_dev)));
   8036 			sc->sc_tbi_linkup = 0;
   8037 		}
   8038 		/* Update LED */
   8039 		wm_tbi_serdes_set_linkled(sc);
   8040 	} else if (icr & ICR_RXSEQ) {
   8041 		DPRINTF(WM_DEBUG_LINK,
   8042 		    ("%s: LINK: Receive sequence error\n",
   8043 		    device_xname(sc->sc_dev)));
   8044 	}
   8045 }
   8046 
   8047 /*
   8048  * wm_linkintr_serdes:
   8049  *
   8050  *	Helper; handle link interrupts for TBI mode.
   8051  */
   8052 static void
   8053 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8054 {
   8055 	struct mii_data *mii = &sc->sc_mii;
   8056 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8057 	uint32_t pcs_adv, pcs_lpab, reg;
   8058 
   8059 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8060 		__func__));
   8061 
   8062 	if (icr & ICR_LSC) {
   8063 		/* Check PCS */
   8064 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8065 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8066 			mii->mii_media_status |= IFM_ACTIVE;
   8067 			sc->sc_tbi_linkup = 1;
   8068 		} else {
   8069 			mii->mii_media_status |= IFM_NONE;
   8070 			sc->sc_tbi_linkup = 0;
   8071 			wm_tbi_serdes_set_linkled(sc);
   8072 			return;
   8073 		}
   8074 		mii->mii_media_active |= IFM_1000_SX;
   8075 		if ((reg & PCS_LSTS_FDX) != 0)
   8076 			mii->mii_media_active |= IFM_FDX;
   8077 		else
   8078 			mii->mii_media_active |= IFM_HDX;
   8079 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8080 			/* Check flow */
   8081 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8082 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8083 				DPRINTF(WM_DEBUG_LINK,
   8084 				    ("XXX LINKOK but not ACOMP\n"));
   8085 				return;
   8086 			}
   8087 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8088 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8089 			DPRINTF(WM_DEBUG_LINK,
   8090 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8091 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8092 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8093 				mii->mii_media_active |= IFM_FLOW
   8094 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8095 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8096 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8097 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8098 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8099 				mii->mii_media_active |= IFM_FLOW
   8100 				    | IFM_ETH_TXPAUSE;
   8101 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8102 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8103 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8104 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8105 				mii->mii_media_active |= IFM_FLOW
   8106 				    | IFM_ETH_RXPAUSE;
   8107 		}
   8108 		/* Update LED */
   8109 		wm_tbi_serdes_set_linkled(sc);
   8110 	} else {
   8111 		DPRINTF(WM_DEBUG_LINK,
   8112 		    ("%s: LINK: Receive sequence error\n",
   8113 		    device_xname(sc->sc_dev)));
   8114 	}
   8115 }
   8116 
   8117 /*
   8118  * wm_linkintr:
   8119  *
   8120  *	Helper; handle link interrupts.
   8121  */
   8122 static void
   8123 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8124 {
   8125 
   8126 	KASSERT(WM_CORE_LOCKED(sc));
   8127 
   8128 	if (sc->sc_flags & WM_F_HAS_MII)
   8129 		wm_linkintr_gmii(sc, icr);
   8130 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8131 	    && (sc->sc_type >= WM_T_82575))
   8132 		wm_linkintr_serdes(sc, icr);
   8133 	else
   8134 		wm_linkintr_tbi(sc, icr);
   8135 }
   8136 
   8137 /*
   8138  * wm_intr_legacy:
   8139  *
   8140  *	Interrupt service routine for INTx and MSI.
   8141  */
   8142 static int
   8143 wm_intr_legacy(void *arg)
   8144 {
   8145 	struct wm_softc *sc = arg;
   8146 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8147 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   8148 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8149 	uint32_t icr, rndval = 0;
   8150 	int handled = 0;
   8151 
   8152 	DPRINTF(WM_DEBUG_TX,
   8153 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   8154 	while (1 /* CONSTCOND */) {
   8155 		icr = CSR_READ(sc, WMREG_ICR);
   8156 		if ((icr & sc->sc_icr) == 0)
   8157 			break;
   8158 		if (rndval == 0)
   8159 			rndval = icr;
   8160 
   8161 		mutex_enter(rxq->rxq_lock);
   8162 
   8163 		if (rxq->rxq_stopping) {
   8164 			mutex_exit(rxq->rxq_lock);
   8165 			break;
   8166 		}
   8167 
   8168 		handled = 1;
   8169 
   8170 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8171 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8172 			DPRINTF(WM_DEBUG_RX,
   8173 			    ("%s: RX: got Rx intr 0x%08x\n",
   8174 			    device_xname(sc->sc_dev),
   8175 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8176 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8177 		}
   8178 #endif
   8179 		wm_rxeof(rxq);
   8180 
   8181 		mutex_exit(rxq->rxq_lock);
   8182 		mutex_enter(txq->txq_lock);
   8183 
   8184 		if (txq->txq_stopping) {
   8185 			mutex_exit(txq->txq_lock);
   8186 			break;
   8187 		}
   8188 
   8189 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8190 		if (icr & ICR_TXDW) {
   8191 			DPRINTF(WM_DEBUG_TX,
   8192 			    ("%s: TX: got TXDW interrupt\n",
   8193 			    device_xname(sc->sc_dev)));
   8194 			WM_Q_EVCNT_INCR(txq, txdw);
   8195 		}
   8196 #endif
   8197 		wm_txeof(sc, txq);
   8198 
   8199 		mutex_exit(txq->txq_lock);
   8200 		WM_CORE_LOCK(sc);
   8201 
   8202 		if (sc->sc_core_stopping) {
   8203 			WM_CORE_UNLOCK(sc);
   8204 			break;
   8205 		}
   8206 
   8207 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8208 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8209 			wm_linkintr(sc, icr);
   8210 		}
   8211 
   8212 		WM_CORE_UNLOCK(sc);
   8213 
   8214 		if (icr & ICR_RXO) {
   8215 #if defined(WM_DEBUG)
   8216 			log(LOG_WARNING, "%s: Receive overrun\n",
   8217 			    device_xname(sc->sc_dev));
   8218 #endif /* defined(WM_DEBUG) */
   8219 		}
   8220 	}
   8221 
   8222 	rnd_add_uint32(&sc->rnd_source, rndval);
   8223 
   8224 	if (handled) {
   8225 		/* Try to get more packets going. */
   8226 		if_schedule_deferred_start(ifp);
   8227 	}
   8228 
   8229 	return handled;
   8230 }
   8231 
   8232 static int
   8233 wm_txrxintr_msix(void *arg)
   8234 {
   8235 	struct wm_queue *wmq = arg;
   8236 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8237 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8238 	struct wm_softc *sc = txq->txq_sc;
   8239 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8240 
   8241 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8242 
   8243 	DPRINTF(WM_DEBUG_TX,
   8244 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8245 
   8246 	if (sc->sc_type == WM_T_82574)
   8247 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8248 	else if (sc->sc_type == WM_T_82575)
   8249 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8250 	else
   8251 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8252 
   8253 	mutex_enter(txq->txq_lock);
   8254 
   8255 	if (txq->txq_stopping) {
   8256 		mutex_exit(txq->txq_lock);
   8257 		return 0;
   8258 	}
   8259 
   8260 	WM_Q_EVCNT_INCR(txq, txdw);
   8261 	wm_txeof(sc, txq);
   8262 
   8263 	/* Try to get more packets going. */
   8264 	if (pcq_peek(txq->txq_interq) != NULL)
   8265 		if_schedule_deferred_start(ifp);
   8266 	/*
   8267 	 * There are still some upper layer processing which call
   8268 	 * ifp->if_start(). e.g. ALTQ
   8269 	 */
   8270 	if (wmq->wmq_id == 0)
   8271 		if_schedule_deferred_start(ifp);
   8272 
   8273 	mutex_exit(txq->txq_lock);
   8274 
   8275 	DPRINTF(WM_DEBUG_RX,
   8276 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8277 	mutex_enter(rxq->rxq_lock);
   8278 
   8279 	if (rxq->rxq_stopping) {
   8280 		mutex_exit(rxq->rxq_lock);
   8281 		return 0;
   8282 	}
   8283 
   8284 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8285 	wm_rxeof(rxq);
   8286 	mutex_exit(rxq->rxq_lock);
   8287 
   8288 	if (sc->sc_type == WM_T_82574)
   8289 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8290 	else if (sc->sc_type == WM_T_82575)
   8291 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8292 	else
   8293 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8294 
   8295 	return 1;
   8296 }
   8297 
   8298 /*
   8299  * wm_linkintr_msix:
   8300  *
   8301  *	Interrupt service routine for link status change for MSI-X.
   8302  */
   8303 static int
   8304 wm_linkintr_msix(void *arg)
   8305 {
   8306 	struct wm_softc *sc = arg;
   8307 	uint32_t reg;
   8308 
   8309 	DPRINTF(WM_DEBUG_LINK,
   8310 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8311 
   8312 	reg = CSR_READ(sc, WMREG_ICR);
   8313 	WM_CORE_LOCK(sc);
   8314 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8315 		goto out;
   8316 
   8317 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8318 	wm_linkintr(sc, ICR_LSC);
   8319 
   8320 out:
   8321 	WM_CORE_UNLOCK(sc);
   8322 
   8323 	if (sc->sc_type == WM_T_82574)
   8324 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8325 	else if (sc->sc_type == WM_T_82575)
   8326 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8327 	else
   8328 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8329 
   8330 	return 1;
   8331 }
   8332 
   8333 /*
   8334  * Media related.
   8335  * GMII, SGMII, TBI (and SERDES)
   8336  */
   8337 
   8338 /* Common */
   8339 
   8340 /*
   8341  * wm_tbi_serdes_set_linkled:
   8342  *
   8343  *	Update the link LED on TBI and SERDES devices.
   8344  */
   8345 static void
   8346 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8347 {
   8348 
   8349 	if (sc->sc_tbi_linkup)
   8350 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8351 	else
   8352 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8353 
   8354 	/* 82540 or newer devices are active low */
   8355 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8356 
   8357 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8358 }
   8359 
   8360 /* GMII related */
   8361 
   8362 /*
   8363  * wm_gmii_reset:
   8364  *
   8365  *	Reset the PHY.
   8366  */
   8367 static void
   8368 wm_gmii_reset(struct wm_softc *sc)
   8369 {
   8370 	uint32_t reg;
   8371 	int rv;
   8372 
   8373 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8374 		device_xname(sc->sc_dev), __func__));
   8375 
   8376 	rv = sc->phy.acquire(sc);
   8377 	if (rv != 0) {
   8378 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8379 		    __func__);
   8380 		return;
   8381 	}
   8382 
   8383 	switch (sc->sc_type) {
   8384 	case WM_T_82542_2_0:
   8385 	case WM_T_82542_2_1:
   8386 		/* null */
   8387 		break;
   8388 	case WM_T_82543:
   8389 		/*
   8390 		 * With 82543, we need to force speed and duplex on the MAC
   8391 		 * equal to what the PHY speed and duplex configuration is.
   8392 		 * In addition, we need to perform a hardware reset on the PHY
   8393 		 * to take it out of reset.
   8394 		 */
   8395 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8396 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8397 
   8398 		/* The PHY reset pin is active-low. */
   8399 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8400 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8401 		    CTRL_EXT_SWDPIN(4));
   8402 		reg |= CTRL_EXT_SWDPIO(4);
   8403 
   8404 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8405 		CSR_WRITE_FLUSH(sc);
   8406 		delay(10*1000);
   8407 
   8408 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8409 		CSR_WRITE_FLUSH(sc);
   8410 		delay(150);
   8411 #if 0
   8412 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8413 #endif
   8414 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8415 		break;
   8416 	case WM_T_82544:	/* reset 10000us */
   8417 	case WM_T_82540:
   8418 	case WM_T_82545:
   8419 	case WM_T_82545_3:
   8420 	case WM_T_82546:
   8421 	case WM_T_82546_3:
   8422 	case WM_T_82541:
   8423 	case WM_T_82541_2:
   8424 	case WM_T_82547:
   8425 	case WM_T_82547_2:
   8426 	case WM_T_82571:	/* reset 100us */
   8427 	case WM_T_82572:
   8428 	case WM_T_82573:
   8429 	case WM_T_82574:
   8430 	case WM_T_82575:
   8431 	case WM_T_82576:
   8432 	case WM_T_82580:
   8433 	case WM_T_I350:
   8434 	case WM_T_I354:
   8435 	case WM_T_I210:
   8436 	case WM_T_I211:
   8437 	case WM_T_82583:
   8438 	case WM_T_80003:
   8439 		/* generic reset */
   8440 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8441 		CSR_WRITE_FLUSH(sc);
   8442 		delay(20000);
   8443 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8444 		CSR_WRITE_FLUSH(sc);
   8445 		delay(20000);
   8446 
   8447 		if ((sc->sc_type == WM_T_82541)
   8448 		    || (sc->sc_type == WM_T_82541_2)
   8449 		    || (sc->sc_type == WM_T_82547)
   8450 		    || (sc->sc_type == WM_T_82547_2)) {
   8451 			/* workaround for igp are done in igp_reset() */
   8452 			/* XXX add code to set LED after phy reset */
   8453 		}
   8454 		break;
   8455 	case WM_T_ICH8:
   8456 	case WM_T_ICH9:
   8457 	case WM_T_ICH10:
   8458 	case WM_T_PCH:
   8459 	case WM_T_PCH2:
   8460 	case WM_T_PCH_LPT:
   8461 	case WM_T_PCH_SPT:
   8462 		/* generic reset */
   8463 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8464 		CSR_WRITE_FLUSH(sc);
   8465 		delay(100);
   8466 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8467 		CSR_WRITE_FLUSH(sc);
   8468 		delay(150);
   8469 		break;
   8470 	default:
   8471 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8472 		    __func__);
   8473 		break;
   8474 	}
   8475 
   8476 	sc->phy.release(sc);
   8477 
   8478 	/* get_cfg_done */
   8479 	wm_get_cfg_done(sc);
   8480 
   8481 	/* extra setup */
   8482 	switch (sc->sc_type) {
   8483 	case WM_T_82542_2_0:
   8484 	case WM_T_82542_2_1:
   8485 	case WM_T_82543:
   8486 	case WM_T_82544:
   8487 	case WM_T_82540:
   8488 	case WM_T_82545:
   8489 	case WM_T_82545_3:
   8490 	case WM_T_82546:
   8491 	case WM_T_82546_3:
   8492 	case WM_T_82541_2:
   8493 	case WM_T_82547_2:
   8494 	case WM_T_82571:
   8495 	case WM_T_82572:
   8496 	case WM_T_82573:
   8497 	case WM_T_82575:
   8498 	case WM_T_82576:
   8499 	case WM_T_82580:
   8500 	case WM_T_I350:
   8501 	case WM_T_I354:
   8502 	case WM_T_I210:
   8503 	case WM_T_I211:
   8504 	case WM_T_80003:
   8505 		/* null */
   8506 		break;
   8507 	case WM_T_82574:
   8508 	case WM_T_82583:
   8509 		wm_lplu_d0_disable(sc);
   8510 		break;
   8511 	case WM_T_82541:
   8512 	case WM_T_82547:
   8513 		/* XXX Configure actively LED after PHY reset */
   8514 		break;
   8515 	case WM_T_ICH8:
   8516 	case WM_T_ICH9:
   8517 	case WM_T_ICH10:
   8518 	case WM_T_PCH:
   8519 	case WM_T_PCH2:
   8520 	case WM_T_PCH_LPT:
   8521 	case WM_T_PCH_SPT:
   8522 		/* Allow time for h/w to get to a quiescent state afer reset */
   8523 		delay(10*1000);
   8524 
   8525 		if (sc->sc_type == WM_T_PCH)
   8526 			wm_hv_phy_workaround_ich8lan(sc);
   8527 
   8528 		if (sc->sc_type == WM_T_PCH2)
   8529 			wm_lv_phy_workaround_ich8lan(sc);
   8530 
   8531 		/* Clear the host wakeup bit after lcd reset */
   8532 		if (sc->sc_type >= WM_T_PCH) {
   8533 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8534 			    BM_PORT_GEN_CFG);
   8535 			reg &= ~BM_WUC_HOST_WU_BIT;
   8536 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8537 			    BM_PORT_GEN_CFG, reg);
   8538 		}
   8539 
   8540 		/*
   8541 		 * XXX Configure the LCD with th extended configuration region
   8542 		 * in NVM
   8543 		 */
   8544 
   8545 		/* Disable D0 LPLU. */
   8546 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8547 			wm_lplu_d0_disable_pch(sc);
   8548 		else
   8549 			wm_lplu_d0_disable(sc);	/* ICH* */
   8550 		break;
   8551 	default:
   8552 		panic("%s: unknown type\n", __func__);
   8553 		break;
   8554 	}
   8555 }
   8556 
   8557 /*
   8558  * wm_get_phy_id_82575:
   8559  *
   8560  * Return PHY ID. Return -1 if it failed.
   8561  */
   8562 static int
   8563 wm_get_phy_id_82575(struct wm_softc *sc)
   8564 {
   8565 	uint32_t reg;
   8566 	int phyid = -1;
   8567 
   8568 	/* XXX */
   8569 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8570 		return -1;
   8571 
   8572 	if (wm_sgmii_uses_mdio(sc)) {
   8573 		switch (sc->sc_type) {
   8574 		case WM_T_82575:
   8575 		case WM_T_82576:
   8576 			reg = CSR_READ(sc, WMREG_MDIC);
   8577 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8578 			break;
   8579 		case WM_T_82580:
   8580 		case WM_T_I350:
   8581 		case WM_T_I354:
   8582 		case WM_T_I210:
   8583 		case WM_T_I211:
   8584 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8585 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8586 			break;
   8587 		default:
   8588 			return -1;
   8589 		}
   8590 	}
   8591 
   8592 	return phyid;
   8593 }
   8594 
   8595 
   8596 /*
   8597  * wm_gmii_mediainit:
   8598  *
   8599  *	Initialize media for use on 1000BASE-T devices.
   8600  */
   8601 static void
   8602 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8603 {
   8604 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8605 	struct mii_data *mii = &sc->sc_mii;
   8606 	uint32_t reg;
   8607 
   8608 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8609 		device_xname(sc->sc_dev), __func__));
   8610 
   8611 	/* We have GMII. */
   8612 	sc->sc_flags |= WM_F_HAS_MII;
   8613 
   8614 	if (sc->sc_type == WM_T_80003)
   8615 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8616 	else
   8617 		sc->sc_tipg = TIPG_1000T_DFLT;
   8618 
   8619 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8620 	if ((sc->sc_type == WM_T_82580)
   8621 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8622 	    || (sc->sc_type == WM_T_I211)) {
   8623 		reg = CSR_READ(sc, WMREG_PHPM);
   8624 		reg &= ~PHPM_GO_LINK_D;
   8625 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8626 	}
   8627 
   8628 	/*
   8629 	 * Let the chip set speed/duplex on its own based on
   8630 	 * signals from the PHY.
   8631 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8632 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8633 	 */
   8634 	sc->sc_ctrl |= CTRL_SLU;
   8635 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8636 
   8637 	/* Initialize our media structures and probe the GMII. */
   8638 	mii->mii_ifp = ifp;
   8639 
   8640 	/*
   8641 	 * Determine the PHY access method.
   8642 	 *
   8643 	 *  For SGMII, use SGMII specific method.
   8644 	 *
   8645 	 *  For some devices, we can determine the PHY access method
   8646 	 * from sc_type.
   8647 	 *
   8648 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8649 	 * access  method by sc_type, so use the PCI product ID for some
   8650 	 * devices.
   8651 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8652 	 * can't detect, then use bm's method.
   8653 	 */
   8654 	switch (prodid) {
   8655 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8656 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8657 		/* 82577 */
   8658 		sc->sc_phytype = WMPHY_82577;
   8659 		break;
   8660 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8661 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8662 		/* 82578 */
   8663 		sc->sc_phytype = WMPHY_82578;
   8664 		break;
   8665 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8666 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8667 		/* 82579 */
   8668 		sc->sc_phytype = WMPHY_82579;
   8669 		break;
   8670 	case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8671 	case PCI_PRODUCT_INTEL_82801I_BM:
   8672 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8673 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8674 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8675 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8676 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8677 		/* ICH8, 9, 10 with 82567 */
   8678 		sc->sc_phytype = WMPHY_BM;
   8679 		mii->mii_readreg = wm_gmii_bm_readreg;
   8680 		mii->mii_writereg = wm_gmii_bm_writereg;
   8681 		break;
   8682 	default:
   8683 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8684 		    && !wm_sgmii_uses_mdio(sc)){
   8685 			/* SGMII */
   8686 			mii->mii_readreg = wm_sgmii_readreg;
   8687 			mii->mii_writereg = wm_sgmii_writereg;
   8688 		} else if ((sc->sc_type == WM_T_82574)
   8689 		    || (sc->sc_type == WM_T_82583)) {
   8690 			/* BM2 (phyaddr == 1) */
   8691 			sc->sc_phytype = WMPHY_BM;
   8692 			mii->mii_readreg = wm_gmii_bm_readreg;
   8693 			mii->mii_writereg = wm_gmii_bm_writereg;
   8694 		} else if (sc->sc_type >= WM_T_ICH8) {
   8695 			/* non-82567 ICH8, 9 and 10 */
   8696 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8697 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8698 		} else if (sc->sc_type >= WM_T_80003) {
   8699 			/* 80003 */
   8700 			sc->sc_phytype = WMPHY_GG82563;
   8701 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8702 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8703 		} else if (sc->sc_type >= WM_T_I210) {
   8704 			/* I210 and I211 */
   8705 			sc->sc_phytype = WMPHY_210;
   8706 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8707 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8708 		} else if (sc->sc_type >= WM_T_82580) {
   8709 			/* 82580, I350 and I354 */
   8710 			sc->sc_phytype = WMPHY_82580;
   8711 			mii->mii_readreg = wm_gmii_82580_readreg;
   8712 			mii->mii_writereg = wm_gmii_82580_writereg;
   8713 		} else if (sc->sc_type >= WM_T_82544) {
   8714 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8715 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8716 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8717 		} else {
   8718 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8719 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8720 		}
   8721 		break;
   8722 	}
   8723 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8724 		/* All PCH* use _hv_ */
   8725 		mii->mii_readreg = wm_gmii_hv_readreg;
   8726 		mii->mii_writereg = wm_gmii_hv_writereg;
   8727 	}
   8728 	mii->mii_statchg = wm_gmii_statchg;
   8729 
   8730 	/* get PHY control from SMBus to PCIe */
   8731 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   8732 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   8733 		wm_smbustopci(sc);
   8734 
   8735 	wm_gmii_reset(sc);
   8736 
   8737 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8738 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8739 	    wm_gmii_mediastatus);
   8740 
   8741 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8742 	    || (sc->sc_type == WM_T_82580)
   8743 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8744 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8745 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8746 			/* Attach only one port */
   8747 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8748 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8749 		} else {
   8750 			int i, id;
   8751 			uint32_t ctrl_ext;
   8752 
   8753 			id = wm_get_phy_id_82575(sc);
   8754 			if (id != -1) {
   8755 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8756 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8757 			}
   8758 			if ((id == -1)
   8759 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8760 				/* Power on sgmii phy if it is disabled */
   8761 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8762 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8763 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8764 				CSR_WRITE_FLUSH(sc);
   8765 				delay(300*1000); /* XXX too long */
   8766 
   8767 				/* from 1 to 8 */
   8768 				for (i = 1; i < 8; i++)
   8769 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8770 					    0xffffffff, i, MII_OFFSET_ANY,
   8771 					    MIIF_DOPAUSE);
   8772 
   8773 				/* restore previous sfp cage power state */
   8774 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8775 			}
   8776 		}
   8777 	} else {
   8778 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8779 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8780 	}
   8781 
   8782 	/*
   8783 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8784 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8785 	 */
   8786 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8787 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8788 		wm_set_mdio_slow_mode_hv(sc);
   8789 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8790 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8791 	}
   8792 
   8793 	/*
   8794 	 * (For ICH8 variants)
   8795 	 * If PHY detection failed, use BM's r/w function and retry.
   8796 	 */
   8797 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8798 		/* if failed, retry with *_bm_* */
   8799 		mii->mii_readreg = wm_gmii_bm_readreg;
   8800 		mii->mii_writereg = wm_gmii_bm_writereg;
   8801 
   8802 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8803 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8804 	}
   8805 
   8806 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8807 		/* Any PHY wasn't find */
   8808 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8809 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8810 		sc->sc_phytype = WMPHY_NONE;
   8811 	} else {
   8812 		/*
   8813 		 * PHY Found!
   8814 		 * Check PHY type.
   8815 		 */
   8816 		uint32_t model;
   8817 		struct mii_softc *child;
   8818 
   8819 		child = LIST_FIRST(&mii->mii_phys);
   8820 		model = child->mii_mpd_model;
   8821 		if (model == MII_MODEL_yyINTEL_I82566)
   8822 			sc->sc_phytype = WMPHY_IGP_3;
   8823 
   8824 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8825 	}
   8826 }
   8827 
   8828 /*
   8829  * wm_gmii_mediachange:	[ifmedia interface function]
   8830  *
   8831  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8832  */
   8833 static int
   8834 wm_gmii_mediachange(struct ifnet *ifp)
   8835 {
   8836 	struct wm_softc *sc = ifp->if_softc;
   8837 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8838 	int rc;
   8839 
   8840 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8841 		device_xname(sc->sc_dev), __func__));
   8842 	if ((ifp->if_flags & IFF_UP) == 0)
   8843 		return 0;
   8844 
   8845 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8846 	sc->sc_ctrl |= CTRL_SLU;
   8847 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8848 	    || (sc->sc_type > WM_T_82543)) {
   8849 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8850 	} else {
   8851 		sc->sc_ctrl &= ~CTRL_ASDE;
   8852 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8853 		if (ife->ifm_media & IFM_FDX)
   8854 			sc->sc_ctrl |= CTRL_FD;
   8855 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8856 		case IFM_10_T:
   8857 			sc->sc_ctrl |= CTRL_SPEED_10;
   8858 			break;
   8859 		case IFM_100_TX:
   8860 			sc->sc_ctrl |= CTRL_SPEED_100;
   8861 			break;
   8862 		case IFM_1000_T:
   8863 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8864 			break;
   8865 		default:
   8866 			panic("wm_gmii_mediachange: bad media 0x%x",
   8867 			    ife->ifm_media);
   8868 		}
   8869 	}
   8870 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8871 	if (sc->sc_type <= WM_T_82543)
   8872 		wm_gmii_reset(sc);
   8873 
   8874 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8875 		return 0;
   8876 	return rc;
   8877 }
   8878 
   8879 /*
   8880  * wm_gmii_mediastatus:	[ifmedia interface function]
   8881  *
   8882  *	Get the current interface media status on a 1000BASE-T device.
   8883  */
   8884 static void
   8885 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8886 {
   8887 	struct wm_softc *sc = ifp->if_softc;
   8888 
   8889 	ether_mediastatus(ifp, ifmr);
   8890 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8891 	    | sc->sc_flowflags;
   8892 }
   8893 
   8894 #define	MDI_IO		CTRL_SWDPIN(2)
   8895 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8896 #define	MDI_CLK		CTRL_SWDPIN(3)
   8897 
   8898 static void
   8899 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8900 {
   8901 	uint32_t i, v;
   8902 
   8903 	v = CSR_READ(sc, WMREG_CTRL);
   8904 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8905 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8906 
   8907 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8908 		if (data & i)
   8909 			v |= MDI_IO;
   8910 		else
   8911 			v &= ~MDI_IO;
   8912 		CSR_WRITE(sc, WMREG_CTRL, v);
   8913 		CSR_WRITE_FLUSH(sc);
   8914 		delay(10);
   8915 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8916 		CSR_WRITE_FLUSH(sc);
   8917 		delay(10);
   8918 		CSR_WRITE(sc, WMREG_CTRL, v);
   8919 		CSR_WRITE_FLUSH(sc);
   8920 		delay(10);
   8921 	}
   8922 }
   8923 
   8924 static uint32_t
   8925 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8926 {
   8927 	uint32_t v, i, data = 0;
   8928 
   8929 	v = CSR_READ(sc, WMREG_CTRL);
   8930 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8931 	v |= CTRL_SWDPIO(3);
   8932 
   8933 	CSR_WRITE(sc, WMREG_CTRL, v);
   8934 	CSR_WRITE_FLUSH(sc);
   8935 	delay(10);
   8936 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8937 	CSR_WRITE_FLUSH(sc);
   8938 	delay(10);
   8939 	CSR_WRITE(sc, WMREG_CTRL, v);
   8940 	CSR_WRITE_FLUSH(sc);
   8941 	delay(10);
   8942 
   8943 	for (i = 0; i < 16; i++) {
   8944 		data <<= 1;
   8945 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8946 		CSR_WRITE_FLUSH(sc);
   8947 		delay(10);
   8948 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8949 			data |= 1;
   8950 		CSR_WRITE(sc, WMREG_CTRL, v);
   8951 		CSR_WRITE_FLUSH(sc);
   8952 		delay(10);
   8953 	}
   8954 
   8955 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8956 	CSR_WRITE_FLUSH(sc);
   8957 	delay(10);
   8958 	CSR_WRITE(sc, WMREG_CTRL, v);
   8959 	CSR_WRITE_FLUSH(sc);
   8960 	delay(10);
   8961 
   8962 	return data;
   8963 }
   8964 
   8965 #undef MDI_IO
   8966 #undef MDI_DIR
   8967 #undef MDI_CLK
   8968 
   8969 /*
   8970  * wm_gmii_i82543_readreg:	[mii interface function]
   8971  *
   8972  *	Read a PHY register on the GMII (i82543 version).
   8973  */
   8974 static int
   8975 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8976 {
   8977 	struct wm_softc *sc = device_private(self);
   8978 	int rv;
   8979 
   8980 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8981 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8982 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8983 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8984 
   8985 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8986 	    device_xname(sc->sc_dev), phy, reg, rv));
   8987 
   8988 	return rv;
   8989 }
   8990 
   8991 /*
   8992  * wm_gmii_i82543_writereg:	[mii interface function]
   8993  *
   8994  *	Write a PHY register on the GMII (i82543 version).
   8995  */
   8996 static void
   8997 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8998 {
   8999 	struct wm_softc *sc = device_private(self);
   9000 
   9001 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9002 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9003 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9004 	    (MII_COMMAND_START << 30), 32);
   9005 }
   9006 
   9007 /*
   9008  * wm_gmii_mdic_readreg:	[mii interface function]
   9009  *
   9010  *	Read a PHY register on the GMII.
   9011  */
   9012 static int
   9013 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   9014 {
   9015 	struct wm_softc *sc = device_private(self);
   9016 	uint32_t mdic = 0;
   9017 	int i, rv;
   9018 
   9019 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9020 	    MDIC_REGADD(reg));
   9021 
   9022 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9023 		mdic = CSR_READ(sc, WMREG_MDIC);
   9024 		if (mdic & MDIC_READY)
   9025 			break;
   9026 		delay(50);
   9027 	}
   9028 
   9029 	if ((mdic & MDIC_READY) == 0) {
   9030 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9031 		    device_xname(sc->sc_dev), phy, reg);
   9032 		rv = 0;
   9033 	} else if (mdic & MDIC_E) {
   9034 #if 0 /* This is normal if no PHY is present. */
   9035 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9036 		    device_xname(sc->sc_dev), phy, reg);
   9037 #endif
   9038 		rv = 0;
   9039 	} else {
   9040 		rv = MDIC_DATA(mdic);
   9041 		if (rv == 0xffff)
   9042 			rv = 0;
   9043 	}
   9044 
   9045 	return rv;
   9046 }
   9047 
   9048 /*
   9049  * wm_gmii_mdic_writereg:	[mii interface function]
   9050  *
   9051  *	Write a PHY register on the GMII.
   9052  */
   9053 static void
   9054 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   9055 {
   9056 	struct wm_softc *sc = device_private(self);
   9057 	uint32_t mdic = 0;
   9058 	int i;
   9059 
   9060 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9061 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9062 
   9063 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9064 		mdic = CSR_READ(sc, WMREG_MDIC);
   9065 		if (mdic & MDIC_READY)
   9066 			break;
   9067 		delay(50);
   9068 	}
   9069 
   9070 	if ((mdic & MDIC_READY) == 0)
   9071 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9072 		    device_xname(sc->sc_dev), phy, reg);
   9073 	else if (mdic & MDIC_E)
   9074 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9075 		    device_xname(sc->sc_dev), phy, reg);
   9076 }
   9077 
   9078 /*
   9079  * wm_gmii_i82544_readreg:	[mii interface function]
   9080  *
   9081  *	Read a PHY register on the GMII.
   9082  */
   9083 static int
   9084 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   9085 {
   9086 	struct wm_softc *sc = device_private(self);
   9087 	int rv;
   9088 
   9089 	if (sc->phy.acquire(sc)) {
   9090 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9091 		    __func__);
   9092 		return 0;
   9093 	}
   9094 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9095 	sc->phy.release(sc);
   9096 
   9097 	return rv;
   9098 }
   9099 
   9100 /*
   9101  * wm_gmii_i82544_writereg:	[mii interface function]
   9102  *
   9103  *	Write a PHY register on the GMII.
   9104  */
   9105 static void
   9106 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   9107 {
   9108 	struct wm_softc *sc = device_private(self);
   9109 
   9110 	if (sc->phy.acquire(sc)) {
   9111 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9112 		    __func__);
   9113 	}
   9114 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9115 	sc->phy.release(sc);
   9116 }
   9117 
   9118 /*
   9119  * wm_gmii_i80003_readreg:	[mii interface function]
   9120  *
   9121  *	Read a PHY register on the kumeran
   9122  * This could be handled by the PHY layer if we didn't have to lock the
   9123  * ressource ...
   9124  */
   9125 static int
   9126 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   9127 {
   9128 	struct wm_softc *sc = device_private(self);
   9129 	int rv;
   9130 
   9131 	if (phy != 1) /* only one PHY on kumeran bus */
   9132 		return 0;
   9133 
   9134 	if (sc->phy.acquire(sc)) {
   9135 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9136 		    __func__);
   9137 		return 0;
   9138 	}
   9139 
   9140 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9141 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9142 		    reg >> GG82563_PAGE_SHIFT);
   9143 	} else {
   9144 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9145 		    reg >> GG82563_PAGE_SHIFT);
   9146 	}
   9147 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9148 	delay(200);
   9149 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9150 	delay(200);
   9151 	sc->phy.release(sc);
   9152 
   9153 	return rv;
   9154 }
   9155 
   9156 /*
   9157  * wm_gmii_i80003_writereg:	[mii interface function]
   9158  *
   9159  *	Write a PHY register on the kumeran.
   9160  * This could be handled by the PHY layer if we didn't have to lock the
   9161  * ressource ...
   9162  */
   9163 static void
   9164 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   9165 {
   9166 	struct wm_softc *sc = device_private(self);
   9167 
   9168 	if (phy != 1) /* only one PHY on kumeran bus */
   9169 		return;
   9170 
   9171 	if (sc->phy.acquire(sc)) {
   9172 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9173 		    __func__);
   9174 		return;
   9175 	}
   9176 
   9177 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9178 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9179 		    reg >> GG82563_PAGE_SHIFT);
   9180 	} else {
   9181 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9182 		    reg >> GG82563_PAGE_SHIFT);
   9183 	}
   9184 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9185 	delay(200);
   9186 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9187 	delay(200);
   9188 
   9189 	sc->phy.release(sc);
   9190 }
   9191 
   9192 /*
   9193  * wm_gmii_bm_readreg:	[mii interface function]
   9194  *
   9195  *	Read a PHY register on the kumeran
   9196  * This could be handled by the PHY layer if we didn't have to lock the
   9197  * ressource ...
   9198  */
   9199 static int
   9200 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   9201 {
   9202 	struct wm_softc *sc = device_private(self);
   9203 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9204 	uint16_t val;
   9205 	int rv;
   9206 
   9207 	if (sc->phy.acquire(sc)) {
   9208 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9209 		    __func__);
   9210 		return 0;
   9211 	}
   9212 
   9213 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9214 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9215 		    || (reg == 31)) ? 1 : phy;
   9216 	/* Page 800 works differently than the rest so it has its own func */
   9217 	if (page == BM_WUC_PAGE) {
   9218 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9219 		rv = val;
   9220 		goto release;
   9221 	}
   9222 
   9223 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9224 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9225 		    && (sc->sc_type != WM_T_82583))
   9226 			wm_gmii_mdic_writereg(self, phy,
   9227 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9228 		else
   9229 			wm_gmii_mdic_writereg(self, phy,
   9230 			    BME1000_PHY_PAGE_SELECT, page);
   9231 	}
   9232 
   9233 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9234 
   9235 release:
   9236 	sc->phy.release(sc);
   9237 	return rv;
   9238 }
   9239 
   9240 /*
   9241  * wm_gmii_bm_writereg:	[mii interface function]
   9242  *
   9243  *	Write a PHY register on the kumeran.
   9244  * This could be handled by the PHY layer if we didn't have to lock the
   9245  * ressource ...
   9246  */
   9247 static void
   9248 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9249 {
   9250 	struct wm_softc *sc = device_private(self);
   9251 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9252 
   9253 	if (sc->phy.acquire(sc)) {
   9254 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9255 		    __func__);
   9256 		return;
   9257 	}
   9258 
   9259 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9260 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9261 		    || (reg == 31)) ? 1 : phy;
   9262 	/* Page 800 works differently than the rest so it has its own func */
   9263 	if (page == BM_WUC_PAGE) {
   9264 		uint16_t tmp;
   9265 
   9266 		tmp = val;
   9267 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9268 		goto release;
   9269 	}
   9270 
   9271 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9272 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9273 		    && (sc->sc_type != WM_T_82583))
   9274 			wm_gmii_mdic_writereg(self, phy,
   9275 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9276 		else
   9277 			wm_gmii_mdic_writereg(self, phy,
   9278 			    BME1000_PHY_PAGE_SELECT, page);
   9279 	}
   9280 
   9281 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9282 
   9283 release:
   9284 	sc->phy.release(sc);
   9285 }
   9286 
   9287 static void
   9288 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9289 {
   9290 	struct wm_softc *sc = device_private(self);
   9291 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9292 	uint16_t wuce, reg;
   9293 
   9294 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9295 		device_xname(sc->sc_dev), __func__));
   9296 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9297 	if (sc->sc_type == WM_T_PCH) {
   9298 		/* XXX e1000 driver do nothing... why? */
   9299 	}
   9300 
   9301 	/*
   9302 	 * 1) Enable PHY wakeup register first.
   9303 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9304 	 */
   9305 
   9306 	/* Set page 769 */
   9307 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9308 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9309 
   9310 	/* Read WUCE and save it */
   9311 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9312 
   9313 	reg = wuce | BM_WUC_ENABLE_BIT;
   9314 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9315 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9316 
   9317 	/* Select page 800 */
   9318 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9319 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9320 
   9321 	/*
   9322 	 * 2) Access PHY wakeup register.
   9323 	 * See e1000_access_phy_wakeup_reg_bm.
   9324 	 */
   9325 
   9326 	/* Write page 800 */
   9327 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9328 
   9329 	if (rd)
   9330 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9331 	else
   9332 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9333 
   9334 	/*
   9335 	 * 3) Disable PHY wakeup register.
   9336 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9337 	 */
   9338 	/* Set page 769 */
   9339 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9340 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9341 
   9342 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9343 }
   9344 
   9345 /*
   9346  * wm_gmii_hv_readreg:	[mii interface function]
   9347  *
   9348  *	Read a PHY register on the kumeran
   9349  * This could be handled by the PHY layer if we didn't have to lock the
   9350  * ressource ...
   9351  */
   9352 static int
   9353 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9354 {
   9355 	struct wm_softc *sc = device_private(self);
   9356 	int rv;
   9357 
   9358 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9359 		device_xname(sc->sc_dev), __func__));
   9360 	if (sc->phy.acquire(sc)) {
   9361 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9362 		    __func__);
   9363 		return 0;
   9364 	}
   9365 
   9366 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9367 	sc->phy.release(sc);
   9368 	return rv;
   9369 }
   9370 
   9371 static int
   9372 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9373 {
   9374 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9375 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9376 	uint16_t val;
   9377 	int rv;
   9378 
   9379 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9380 
   9381 	/* Page 800 works differently than the rest so it has its own func */
   9382 	if (page == BM_WUC_PAGE) {
   9383 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9384 		return val;
   9385 	}
   9386 
   9387 	/*
   9388 	 * Lower than page 768 works differently than the rest so it has its
   9389 	 * own func
   9390 	 */
   9391 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9392 		printf("gmii_hv_readreg!!!\n");
   9393 		return 0;
   9394 	}
   9395 
   9396 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9397 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9398 		    page << BME1000_PAGE_SHIFT);
   9399 	}
   9400 
   9401 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9402 	return rv;
   9403 }
   9404 
   9405 /*
   9406  * wm_gmii_hv_writereg:	[mii interface function]
   9407  *
   9408  *	Write a PHY register on the kumeran.
   9409  * This could be handled by the PHY layer if we didn't have to lock the
   9410  * ressource ...
   9411  */
   9412 static void
   9413 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9414 {
   9415 	struct wm_softc *sc = device_private(self);
   9416 
   9417 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9418 		device_xname(sc->sc_dev), __func__));
   9419 
   9420 	if (sc->phy.acquire(sc)) {
   9421 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9422 		    __func__);
   9423 		return;
   9424 	}
   9425 
   9426 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9427 	sc->phy.release(sc);
   9428 }
   9429 
   9430 static void
   9431 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9432 {
   9433 	struct wm_softc *sc = device_private(self);
   9434 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9435 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9436 
   9437 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9438 
   9439 	/* Page 800 works differently than the rest so it has its own func */
   9440 	if (page == BM_WUC_PAGE) {
   9441 		uint16_t tmp;
   9442 
   9443 		tmp = val;
   9444 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9445 		return;
   9446 	}
   9447 
   9448 	/*
   9449 	 * Lower than page 768 works differently than the rest so it has its
   9450 	 * own func
   9451 	 */
   9452 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9453 		printf("gmii_hv_writereg!!!\n");
   9454 		return;
   9455 	}
   9456 
   9457 	{
   9458 		/*
   9459 		 * XXX Workaround MDIO accesses being disabled after entering
   9460 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9461 		 * register is set)
   9462 		 */
   9463 		if (sc->sc_phytype == WMPHY_82578) {
   9464 			struct mii_softc *child;
   9465 
   9466 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9467 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9468 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9469 			    && ((val & (1 << 11)) != 0)) {
   9470 				printf("XXX need workaround\n");
   9471 			}
   9472 		}
   9473 
   9474 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9475 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9476 			    page << BME1000_PAGE_SHIFT);
   9477 		}
   9478 	}
   9479 
   9480 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   9481 }
   9482 
   9483 /*
   9484  * wm_gmii_82580_readreg:	[mii interface function]
   9485  *
   9486  *	Read a PHY register on the 82580 and I350.
   9487  * This could be handled by the PHY layer if we didn't have to lock the
   9488  * ressource ...
   9489  */
   9490 static int
   9491 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   9492 {
   9493 	struct wm_softc *sc = device_private(self);
   9494 	int rv;
   9495 
   9496 	if (sc->phy.acquire(sc) != 0) {
   9497 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9498 		    __func__);
   9499 		return 0;
   9500 	}
   9501 
   9502 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9503 
   9504 	sc->phy.release(sc);
   9505 	return rv;
   9506 }
   9507 
   9508 /*
   9509  * wm_gmii_82580_writereg:	[mii interface function]
   9510  *
   9511  *	Write a PHY register on the 82580 and I350.
   9512  * This could be handled by the PHY layer if we didn't have to lock the
   9513  * ressource ...
   9514  */
   9515 static void
   9516 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   9517 {
   9518 	struct wm_softc *sc = device_private(self);
   9519 
   9520 	if (sc->phy.acquire(sc) != 0) {
   9521 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9522 		    __func__);
   9523 		return;
   9524 	}
   9525 
   9526 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9527 
   9528 	sc->phy.release(sc);
   9529 }
   9530 
   9531 /*
   9532  * wm_gmii_gs40g_readreg:	[mii interface function]
   9533  *
   9534  *	Read a PHY register on the I2100 and I211.
   9535  * This could be handled by the PHY layer if we didn't have to lock the
   9536  * ressource ...
   9537  */
   9538 static int
   9539 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   9540 {
   9541 	struct wm_softc *sc = device_private(self);
   9542 	int page, offset;
   9543 	int rv;
   9544 
   9545 	/* Acquire semaphore */
   9546 	if (sc->phy.acquire(sc)) {
   9547 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9548 		    __func__);
   9549 		return 0;
   9550 	}
   9551 
   9552 	/* Page select */
   9553 	page = reg >> GS40G_PAGE_SHIFT;
   9554 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9555 
   9556 	/* Read reg */
   9557 	offset = reg & GS40G_OFFSET_MASK;
   9558 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   9559 
   9560 	sc->phy.release(sc);
   9561 	return rv;
   9562 }
   9563 
   9564 /*
   9565  * wm_gmii_gs40g_writereg:	[mii interface function]
   9566  *
   9567  *	Write a PHY register on the I210 and I211.
   9568  * This could be handled by the PHY layer if we didn't have to lock the
   9569  * ressource ...
   9570  */
   9571 static void
   9572 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   9573 {
   9574 	struct wm_softc *sc = device_private(self);
   9575 	int page, offset;
   9576 
   9577 	/* Acquire semaphore */
   9578 	if (sc->phy.acquire(sc)) {
   9579 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9580 		    __func__);
   9581 		return;
   9582 	}
   9583 
   9584 	/* Page select */
   9585 	page = reg >> GS40G_PAGE_SHIFT;
   9586 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9587 
   9588 	/* Write reg */
   9589 	offset = reg & GS40G_OFFSET_MASK;
   9590 	wm_gmii_mdic_writereg(self, phy, offset, val);
   9591 
   9592 	/* Release semaphore */
   9593 	sc->phy.release(sc);
   9594 }
   9595 
   9596 /*
   9597  * wm_gmii_statchg:	[mii interface function]
   9598  *
   9599  *	Callback from MII layer when media changes.
   9600  */
   9601 static void
   9602 wm_gmii_statchg(struct ifnet *ifp)
   9603 {
   9604 	struct wm_softc *sc = ifp->if_softc;
   9605 	struct mii_data *mii = &sc->sc_mii;
   9606 
   9607 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   9608 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9609 	sc->sc_fcrtl &= ~FCRTL_XONE;
   9610 
   9611 	/*
   9612 	 * Get flow control negotiation result.
   9613 	 */
   9614 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   9615 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   9616 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   9617 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   9618 	}
   9619 
   9620 	if (sc->sc_flowflags & IFM_FLOW) {
   9621 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   9622 			sc->sc_ctrl |= CTRL_TFCE;
   9623 			sc->sc_fcrtl |= FCRTL_XONE;
   9624 		}
   9625 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   9626 			sc->sc_ctrl |= CTRL_RFCE;
   9627 	}
   9628 
   9629 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   9630 		DPRINTF(WM_DEBUG_LINK,
   9631 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   9632 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9633 	} else {
   9634 		DPRINTF(WM_DEBUG_LINK,
   9635 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   9636 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9637 	}
   9638 
   9639 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9640 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9641 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   9642 						 : WMREG_FCRTL, sc->sc_fcrtl);
   9643 	if (sc->sc_type == WM_T_80003) {
   9644 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   9645 		case IFM_1000_T:
   9646 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9647 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   9648 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9649 			break;
   9650 		default:
   9651 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9652 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   9653 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   9654 			break;
   9655 		}
   9656 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   9657 	}
   9658 }
   9659 
   9660 /* kumeran related (80003, ICH* and PCH*) */
   9661 
   9662 /*
   9663  * wm_kmrn_readreg:
   9664  *
   9665  *	Read a kumeran register
   9666  */
   9667 static int
   9668 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   9669 {
   9670 	int rv;
   9671 
   9672 	if (sc->sc_type == WM_T_80003)
   9673 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9674 	else
   9675 		rv = sc->phy.acquire(sc);
   9676 	if (rv != 0) {
   9677 		aprint_error_dev(sc->sc_dev,
   9678 		    "%s: failed to get semaphore\n", __func__);
   9679 		return 0;
   9680 	}
   9681 
   9682 	rv = wm_kmrn_readreg_locked(sc, reg);
   9683 
   9684 	if (sc->sc_type == WM_T_80003)
   9685 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9686 	else
   9687 		sc->phy.release(sc);
   9688 
   9689 	return rv;
   9690 }
   9691 
   9692 static int
   9693 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   9694 {
   9695 	int rv;
   9696 
   9697 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9698 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9699 	    KUMCTRLSTA_REN);
   9700 	CSR_WRITE_FLUSH(sc);
   9701 	delay(2);
   9702 
   9703 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9704 
   9705 	return rv;
   9706 }
   9707 
   9708 /*
   9709  * wm_kmrn_writereg:
   9710  *
   9711  *	Write a kumeran register
   9712  */
   9713 static void
   9714 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9715 {
   9716 	int rv;
   9717 
   9718 	if (sc->sc_type == WM_T_80003)
   9719 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9720 	else
   9721 		rv = sc->phy.acquire(sc);
   9722 	if (rv != 0) {
   9723 		aprint_error_dev(sc->sc_dev,
   9724 		    "%s: failed to get semaphore\n", __func__);
   9725 		return;
   9726 	}
   9727 
   9728 	wm_kmrn_writereg_locked(sc, reg, val);
   9729 
   9730 	if (sc->sc_type == WM_T_80003)
   9731 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9732 	else
   9733 		sc->phy.release(sc);
   9734 }
   9735 
   9736 static void
   9737 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   9738 {
   9739 
   9740 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9741 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9742 	    (val & KUMCTRLSTA_MASK));
   9743 }
   9744 
   9745 /* SGMII related */
   9746 
   9747 /*
   9748  * wm_sgmii_uses_mdio
   9749  *
   9750  * Check whether the transaction is to the internal PHY or the external
   9751  * MDIO interface. Return true if it's MDIO.
   9752  */
   9753 static bool
   9754 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9755 {
   9756 	uint32_t reg;
   9757 	bool ismdio = false;
   9758 
   9759 	switch (sc->sc_type) {
   9760 	case WM_T_82575:
   9761 	case WM_T_82576:
   9762 		reg = CSR_READ(sc, WMREG_MDIC);
   9763 		ismdio = ((reg & MDIC_DEST) != 0);
   9764 		break;
   9765 	case WM_T_82580:
   9766 	case WM_T_I350:
   9767 	case WM_T_I354:
   9768 	case WM_T_I210:
   9769 	case WM_T_I211:
   9770 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9771 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9772 		break;
   9773 	default:
   9774 		break;
   9775 	}
   9776 
   9777 	return ismdio;
   9778 }
   9779 
   9780 /*
   9781  * wm_sgmii_readreg:	[mii interface function]
   9782  *
   9783  *	Read a PHY register on the SGMII
   9784  * This could be handled by the PHY layer if we didn't have to lock the
   9785  * ressource ...
   9786  */
   9787 static int
   9788 wm_sgmii_readreg(device_t self, int phy, int reg)
   9789 {
   9790 	struct wm_softc *sc = device_private(self);
   9791 	uint32_t i2ccmd;
   9792 	int i, rv;
   9793 
   9794 	if (sc->phy.acquire(sc)) {
   9795 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9796 		    __func__);
   9797 		return 0;
   9798 	}
   9799 
   9800 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9801 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9802 	    | I2CCMD_OPCODE_READ;
   9803 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9804 
   9805 	/* Poll the ready bit */
   9806 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9807 		delay(50);
   9808 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9809 		if (i2ccmd & I2CCMD_READY)
   9810 			break;
   9811 	}
   9812 	if ((i2ccmd & I2CCMD_READY) == 0)
   9813 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9814 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9815 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9816 
   9817 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9818 
   9819 	sc->phy.release(sc);
   9820 	return rv;
   9821 }
   9822 
   9823 /*
   9824  * wm_sgmii_writereg:	[mii interface function]
   9825  *
   9826  *	Write a PHY register on the SGMII.
   9827  * This could be handled by the PHY layer if we didn't have to lock the
   9828  * ressource ...
   9829  */
   9830 static void
   9831 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9832 {
   9833 	struct wm_softc *sc = device_private(self);
   9834 	uint32_t i2ccmd;
   9835 	int i;
   9836 	int val_swapped;
   9837 
   9838 	if (sc->phy.acquire(sc) != 0) {
   9839 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9840 		    __func__);
   9841 		return;
   9842 	}
   9843 	/* Swap the data bytes for the I2C interface */
   9844 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9845 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9846 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9847 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9848 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9849 
   9850 	/* Poll the ready bit */
   9851 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9852 		delay(50);
   9853 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9854 		if (i2ccmd & I2CCMD_READY)
   9855 			break;
   9856 	}
   9857 	if ((i2ccmd & I2CCMD_READY) == 0)
   9858 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9859 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9860 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9861 
   9862 	sc->phy.release(sc);
   9863 }
   9864 
   9865 /* TBI related */
   9866 
   9867 /*
   9868  * wm_tbi_mediainit:
   9869  *
   9870  *	Initialize media for use on 1000BASE-X devices.
   9871  */
   9872 static void
   9873 wm_tbi_mediainit(struct wm_softc *sc)
   9874 {
   9875 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9876 	const char *sep = "";
   9877 
   9878 	if (sc->sc_type < WM_T_82543)
   9879 		sc->sc_tipg = TIPG_WM_DFLT;
   9880 	else
   9881 		sc->sc_tipg = TIPG_LG_DFLT;
   9882 
   9883 	sc->sc_tbi_serdes_anegticks = 5;
   9884 
   9885 	/* Initialize our media structures */
   9886 	sc->sc_mii.mii_ifp = ifp;
   9887 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9888 
   9889 	if ((sc->sc_type >= WM_T_82575)
   9890 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9891 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9892 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9893 	else
   9894 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9895 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9896 
   9897 	/*
   9898 	 * SWD Pins:
   9899 	 *
   9900 	 *	0 = Link LED (output)
   9901 	 *	1 = Loss Of Signal (input)
   9902 	 */
   9903 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9904 
   9905 	/* XXX Perhaps this is only for TBI */
   9906 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9907 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9908 
   9909 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9910 		sc->sc_ctrl &= ~CTRL_LRST;
   9911 
   9912 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9913 
   9914 #define	ADD(ss, mm, dd)							\
   9915 do {									\
   9916 	aprint_normal("%s%s", sep, ss);					\
   9917 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9918 	sep = ", ";							\
   9919 } while (/*CONSTCOND*/0)
   9920 
   9921 	aprint_normal_dev(sc->sc_dev, "");
   9922 
   9923 	if (sc->sc_type == WM_T_I354) {
   9924 		uint32_t status;
   9925 
   9926 		status = CSR_READ(sc, WMREG_STATUS);
   9927 		if (((status & STATUS_2P5_SKU) != 0)
   9928 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   9929 			ADD("2500baseKX-FDX", IFM_2500_SX | IFM_FDX,ANAR_X_FD);
   9930 		} else
   9931 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX,ANAR_X_FD);
   9932 	} else if (sc->sc_type == WM_T_82545) {
   9933 		/* Only 82545 is LX (XXX except SFP) */
   9934 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9935 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9936 	} else {
   9937 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9938 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9939 	}
   9940 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9941 	aprint_normal("\n");
   9942 
   9943 #undef ADD
   9944 
   9945 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9946 }
   9947 
   9948 /*
   9949  * wm_tbi_mediachange:	[ifmedia interface function]
   9950  *
   9951  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9952  */
   9953 static int
   9954 wm_tbi_mediachange(struct ifnet *ifp)
   9955 {
   9956 	struct wm_softc *sc = ifp->if_softc;
   9957 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9958 	uint32_t status;
   9959 	int i;
   9960 
   9961 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9962 		/* XXX need some work for >= 82571 and < 82575 */
   9963 		if (sc->sc_type < WM_T_82575)
   9964 			return 0;
   9965 	}
   9966 
   9967 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9968 	    || (sc->sc_type >= WM_T_82575))
   9969 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9970 
   9971 	sc->sc_ctrl &= ~CTRL_LRST;
   9972 	sc->sc_txcw = TXCW_ANE;
   9973 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9974 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9975 	else if (ife->ifm_media & IFM_FDX)
   9976 		sc->sc_txcw |= TXCW_FD;
   9977 	else
   9978 		sc->sc_txcw |= TXCW_HD;
   9979 
   9980 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9981 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9982 
   9983 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9984 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9985 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9986 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9987 	CSR_WRITE_FLUSH(sc);
   9988 	delay(1000);
   9989 
   9990 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9991 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9992 
   9993 	/*
   9994 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9995 	 * optics detect a signal, 0 if they don't.
   9996 	 */
   9997 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9998 		/* Have signal; wait for the link to come up. */
   9999 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10000 			delay(10000);
   10001 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10002 				break;
   10003 		}
   10004 
   10005 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10006 			    device_xname(sc->sc_dev),i));
   10007 
   10008 		status = CSR_READ(sc, WMREG_STATUS);
   10009 		DPRINTF(WM_DEBUG_LINK,
   10010 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10011 			device_xname(sc->sc_dev),status, STATUS_LU));
   10012 		if (status & STATUS_LU) {
   10013 			/* Link is up. */
   10014 			DPRINTF(WM_DEBUG_LINK,
   10015 			    ("%s: LINK: set media -> link up %s\n",
   10016 			    device_xname(sc->sc_dev),
   10017 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10018 
   10019 			/*
   10020 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10021 			 * so we should update sc->sc_ctrl
   10022 			 */
   10023 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10024 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10025 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10026 			if (status & STATUS_FD)
   10027 				sc->sc_tctl |=
   10028 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10029 			else
   10030 				sc->sc_tctl |=
   10031 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10032 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10033 				sc->sc_fcrtl |= FCRTL_XONE;
   10034 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10035 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10036 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10037 				      sc->sc_fcrtl);
   10038 			sc->sc_tbi_linkup = 1;
   10039 		} else {
   10040 			if (i == WM_LINKUP_TIMEOUT)
   10041 				wm_check_for_link(sc);
   10042 			/* Link is down. */
   10043 			DPRINTF(WM_DEBUG_LINK,
   10044 			    ("%s: LINK: set media -> link down\n",
   10045 			    device_xname(sc->sc_dev)));
   10046 			sc->sc_tbi_linkup = 0;
   10047 		}
   10048 	} else {
   10049 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10050 		    device_xname(sc->sc_dev)));
   10051 		sc->sc_tbi_linkup = 0;
   10052 	}
   10053 
   10054 	wm_tbi_serdes_set_linkled(sc);
   10055 
   10056 	return 0;
   10057 }
   10058 
   10059 /*
   10060  * wm_tbi_mediastatus:	[ifmedia interface function]
   10061  *
   10062  *	Get the current interface media status on a 1000BASE-X device.
   10063  */
   10064 static void
   10065 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10066 {
   10067 	struct wm_softc *sc = ifp->if_softc;
   10068 	uint32_t ctrl, status;
   10069 
   10070 	ifmr->ifm_status = IFM_AVALID;
   10071 	ifmr->ifm_active = IFM_ETHER;
   10072 
   10073 	status = CSR_READ(sc, WMREG_STATUS);
   10074 	if ((status & STATUS_LU) == 0) {
   10075 		ifmr->ifm_active |= IFM_NONE;
   10076 		return;
   10077 	}
   10078 
   10079 	ifmr->ifm_status |= IFM_ACTIVE;
   10080 	/* Only 82545 is LX */
   10081 	if (sc->sc_type == WM_T_82545)
   10082 		ifmr->ifm_active |= IFM_1000_LX;
   10083 	else
   10084 		ifmr->ifm_active |= IFM_1000_SX;
   10085 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10086 		ifmr->ifm_active |= IFM_FDX;
   10087 	else
   10088 		ifmr->ifm_active |= IFM_HDX;
   10089 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10090 	if (ctrl & CTRL_RFCE)
   10091 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10092 	if (ctrl & CTRL_TFCE)
   10093 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10094 }
   10095 
   10096 /* XXX TBI only */
   10097 static int
   10098 wm_check_for_link(struct wm_softc *sc)
   10099 {
   10100 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10101 	uint32_t rxcw;
   10102 	uint32_t ctrl;
   10103 	uint32_t status;
   10104 	uint32_t sig;
   10105 
   10106 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10107 		/* XXX need some work for >= 82571 */
   10108 		if (sc->sc_type >= WM_T_82571) {
   10109 			sc->sc_tbi_linkup = 1;
   10110 			return 0;
   10111 		}
   10112 	}
   10113 
   10114 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10115 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10116 	status = CSR_READ(sc, WMREG_STATUS);
   10117 
   10118 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10119 
   10120 	DPRINTF(WM_DEBUG_LINK,
   10121 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10122 		device_xname(sc->sc_dev), __func__,
   10123 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10124 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10125 
   10126 	/*
   10127 	 * SWDPIN   LU RXCW
   10128 	 *      0    0    0
   10129 	 *      0    0    1	(should not happen)
   10130 	 *      0    1    0	(should not happen)
   10131 	 *      0    1    1	(should not happen)
   10132 	 *      1    0    0	Disable autonego and force linkup
   10133 	 *      1    0    1	got /C/ but not linkup yet
   10134 	 *      1    1    0	(linkup)
   10135 	 *      1    1    1	If IFM_AUTO, back to autonego
   10136 	 *
   10137 	 */
   10138 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10139 	    && ((status & STATUS_LU) == 0)
   10140 	    && ((rxcw & RXCW_C) == 0)) {
   10141 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10142 			__func__));
   10143 		sc->sc_tbi_linkup = 0;
   10144 		/* Disable auto-negotiation in the TXCW register */
   10145 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10146 
   10147 		/*
   10148 		 * Force link-up and also force full-duplex.
   10149 		 *
   10150 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10151 		 * so we should update sc->sc_ctrl
   10152 		 */
   10153 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10154 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10155 	} else if (((status & STATUS_LU) != 0)
   10156 	    && ((rxcw & RXCW_C) != 0)
   10157 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10158 		sc->sc_tbi_linkup = 1;
   10159 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10160 			__func__));
   10161 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10162 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10163 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10164 	    && ((rxcw & RXCW_C) != 0)) {
   10165 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10166 	} else {
   10167 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10168 			status));
   10169 	}
   10170 
   10171 	return 0;
   10172 }
   10173 
   10174 /*
   10175  * wm_tbi_tick:
   10176  *
   10177  *	Check the link on TBI devices.
   10178  *	This function acts as mii_tick().
   10179  */
   10180 static void
   10181 wm_tbi_tick(struct wm_softc *sc)
   10182 {
   10183 	struct mii_data *mii = &sc->sc_mii;
   10184 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10185 	uint32_t status;
   10186 
   10187 	KASSERT(WM_CORE_LOCKED(sc));
   10188 
   10189 	status = CSR_READ(sc, WMREG_STATUS);
   10190 
   10191 	/* XXX is this needed? */
   10192 	(void)CSR_READ(sc, WMREG_RXCW);
   10193 	(void)CSR_READ(sc, WMREG_CTRL);
   10194 
   10195 	/* set link status */
   10196 	if ((status & STATUS_LU) == 0) {
   10197 		DPRINTF(WM_DEBUG_LINK,
   10198 		    ("%s: LINK: checklink -> down\n",
   10199 			device_xname(sc->sc_dev)));
   10200 		sc->sc_tbi_linkup = 0;
   10201 	} else if (sc->sc_tbi_linkup == 0) {
   10202 		DPRINTF(WM_DEBUG_LINK,
   10203 		    ("%s: LINK: checklink -> up %s\n",
   10204 			device_xname(sc->sc_dev),
   10205 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10206 		sc->sc_tbi_linkup = 1;
   10207 		sc->sc_tbi_serdes_ticks = 0;
   10208 	}
   10209 
   10210 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10211 		goto setled;
   10212 
   10213 	if ((status & STATUS_LU) == 0) {
   10214 		sc->sc_tbi_linkup = 0;
   10215 		/* If the timer expired, retry autonegotiation */
   10216 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10217 		    && (++sc->sc_tbi_serdes_ticks
   10218 			>= sc->sc_tbi_serdes_anegticks)) {
   10219 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10220 			sc->sc_tbi_serdes_ticks = 0;
   10221 			/*
   10222 			 * Reset the link, and let autonegotiation do
   10223 			 * its thing
   10224 			 */
   10225 			sc->sc_ctrl |= CTRL_LRST;
   10226 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10227 			CSR_WRITE_FLUSH(sc);
   10228 			delay(1000);
   10229 			sc->sc_ctrl &= ~CTRL_LRST;
   10230 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10231 			CSR_WRITE_FLUSH(sc);
   10232 			delay(1000);
   10233 			CSR_WRITE(sc, WMREG_TXCW,
   10234 			    sc->sc_txcw & ~TXCW_ANE);
   10235 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10236 		}
   10237 	}
   10238 
   10239 setled:
   10240 	wm_tbi_serdes_set_linkled(sc);
   10241 }
   10242 
   10243 /* SERDES related */
   10244 static void
   10245 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10246 {
   10247 	uint32_t reg;
   10248 
   10249 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10250 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10251 		return;
   10252 
   10253 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10254 	reg |= PCS_CFG_PCS_EN;
   10255 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10256 
   10257 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10258 	reg &= ~CTRL_EXT_SWDPIN(3);
   10259 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10260 	CSR_WRITE_FLUSH(sc);
   10261 }
   10262 
   10263 static int
   10264 wm_serdes_mediachange(struct ifnet *ifp)
   10265 {
   10266 	struct wm_softc *sc = ifp->if_softc;
   10267 	bool pcs_autoneg = true; /* XXX */
   10268 	uint32_t ctrl_ext, pcs_lctl, reg;
   10269 
   10270 	/* XXX Currently, this function is not called on 8257[12] */
   10271 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10272 	    || (sc->sc_type >= WM_T_82575))
   10273 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10274 
   10275 	wm_serdes_power_up_link_82575(sc);
   10276 
   10277 	sc->sc_ctrl |= CTRL_SLU;
   10278 
   10279 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10280 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10281 
   10282 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10283 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10284 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10285 	case CTRL_EXT_LINK_MODE_SGMII:
   10286 		pcs_autoneg = true;
   10287 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10288 		break;
   10289 	case CTRL_EXT_LINK_MODE_1000KX:
   10290 		pcs_autoneg = false;
   10291 		/* FALLTHROUGH */
   10292 	default:
   10293 		if ((sc->sc_type == WM_T_82575)
   10294 		    || (sc->sc_type == WM_T_82576)) {
   10295 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10296 				pcs_autoneg = false;
   10297 		}
   10298 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10299 		    | CTRL_FRCFDX;
   10300 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10301 	}
   10302 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10303 
   10304 	if (pcs_autoneg) {
   10305 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10306 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10307 
   10308 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10309 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10310 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10311 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10312 	} else
   10313 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10314 
   10315 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10316 
   10317 
   10318 	return 0;
   10319 }
   10320 
   10321 static void
   10322 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10323 {
   10324 	struct wm_softc *sc = ifp->if_softc;
   10325 	struct mii_data *mii = &sc->sc_mii;
   10326 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10327 	uint32_t pcs_adv, pcs_lpab, reg;
   10328 
   10329 	ifmr->ifm_status = IFM_AVALID;
   10330 	ifmr->ifm_active = IFM_ETHER;
   10331 
   10332 	/* Check PCS */
   10333 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10334 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10335 		ifmr->ifm_active |= IFM_NONE;
   10336 		sc->sc_tbi_linkup = 0;
   10337 		goto setled;
   10338 	}
   10339 
   10340 	sc->sc_tbi_linkup = 1;
   10341 	ifmr->ifm_status |= IFM_ACTIVE;
   10342 	if (sc->sc_type == WM_T_I354) {
   10343 		uint32_t status;
   10344 
   10345 		status = CSR_READ(sc, WMREG_STATUS);
   10346 		if (((status & STATUS_2P5_SKU) != 0)
   10347 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10348 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10349 		} else
   10350 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10351 	} else {
   10352 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10353 		case PCS_LSTS_SPEED_10:
   10354 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10355 			break;
   10356 		case PCS_LSTS_SPEED_100:
   10357 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10358 			break;
   10359 		case PCS_LSTS_SPEED_1000:
   10360 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10361 			break;
   10362 		default:
   10363 			device_printf(sc->sc_dev, "Unknown speed\n");
   10364 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10365 			break;
   10366 		}
   10367 	}
   10368 	if ((reg & PCS_LSTS_FDX) != 0)
   10369 		ifmr->ifm_active |= IFM_FDX;
   10370 	else
   10371 		ifmr->ifm_active |= IFM_HDX;
   10372 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10373 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10374 		/* Check flow */
   10375 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10376 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10377 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10378 			goto setled;
   10379 		}
   10380 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10381 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10382 		DPRINTF(WM_DEBUG_LINK,
   10383 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10384 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10385 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10386 			mii->mii_media_active |= IFM_FLOW
   10387 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10388 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10389 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10390 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10391 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10392 			mii->mii_media_active |= IFM_FLOW
   10393 			    | IFM_ETH_TXPAUSE;
   10394 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10395 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10396 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10397 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10398 			mii->mii_media_active |= IFM_FLOW
   10399 			    | IFM_ETH_RXPAUSE;
   10400 		}
   10401 	}
   10402 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10403 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10404 setled:
   10405 	wm_tbi_serdes_set_linkled(sc);
   10406 }
   10407 
   10408 /*
   10409  * wm_serdes_tick:
   10410  *
   10411  *	Check the link on serdes devices.
   10412  */
   10413 static void
   10414 wm_serdes_tick(struct wm_softc *sc)
   10415 {
   10416 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10417 	struct mii_data *mii = &sc->sc_mii;
   10418 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10419 	uint32_t reg;
   10420 
   10421 	KASSERT(WM_CORE_LOCKED(sc));
   10422 
   10423 	mii->mii_media_status = IFM_AVALID;
   10424 	mii->mii_media_active = IFM_ETHER;
   10425 
   10426 	/* Check PCS */
   10427 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10428 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10429 		mii->mii_media_status |= IFM_ACTIVE;
   10430 		sc->sc_tbi_linkup = 1;
   10431 		sc->sc_tbi_serdes_ticks = 0;
   10432 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10433 		if ((reg & PCS_LSTS_FDX) != 0)
   10434 			mii->mii_media_active |= IFM_FDX;
   10435 		else
   10436 			mii->mii_media_active |= IFM_HDX;
   10437 	} else {
   10438 		mii->mii_media_status |= IFM_NONE;
   10439 		sc->sc_tbi_linkup = 0;
   10440 		/* If the timer expired, retry autonegotiation */
   10441 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10442 		    && (++sc->sc_tbi_serdes_ticks
   10443 			>= sc->sc_tbi_serdes_anegticks)) {
   10444 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10445 			sc->sc_tbi_serdes_ticks = 0;
   10446 			/* XXX */
   10447 			wm_serdes_mediachange(ifp);
   10448 		}
   10449 	}
   10450 
   10451 	wm_tbi_serdes_set_linkled(sc);
   10452 }
   10453 
   10454 /* SFP related */
   10455 
   10456 static int
   10457 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10458 {
   10459 	uint32_t i2ccmd;
   10460 	int i;
   10461 
   10462 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10463 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10464 
   10465 	/* Poll the ready bit */
   10466 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10467 		delay(50);
   10468 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10469 		if (i2ccmd & I2CCMD_READY)
   10470 			break;
   10471 	}
   10472 	if ((i2ccmd & I2CCMD_READY) == 0)
   10473 		return -1;
   10474 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10475 		return -1;
   10476 
   10477 	*data = i2ccmd & 0x00ff;
   10478 
   10479 	return 0;
   10480 }
   10481 
   10482 static uint32_t
   10483 wm_sfp_get_media_type(struct wm_softc *sc)
   10484 {
   10485 	uint32_t ctrl_ext;
   10486 	uint8_t val = 0;
   10487 	int timeout = 3;
   10488 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   10489 	int rv = -1;
   10490 
   10491 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10492 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   10493 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   10494 	CSR_WRITE_FLUSH(sc);
   10495 
   10496 	/* Read SFP module data */
   10497 	while (timeout) {
   10498 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   10499 		if (rv == 0)
   10500 			break;
   10501 		delay(100*1000); /* XXX too big */
   10502 		timeout--;
   10503 	}
   10504 	if (rv != 0)
   10505 		goto out;
   10506 	switch (val) {
   10507 	case SFF_SFP_ID_SFF:
   10508 		aprint_normal_dev(sc->sc_dev,
   10509 		    "Module/Connector soldered to board\n");
   10510 		break;
   10511 	case SFF_SFP_ID_SFP:
   10512 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   10513 		break;
   10514 	case SFF_SFP_ID_UNKNOWN:
   10515 		goto out;
   10516 	default:
   10517 		break;
   10518 	}
   10519 
   10520 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   10521 	if (rv != 0) {
   10522 		goto out;
   10523 	}
   10524 
   10525 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   10526 		mediatype = WM_MEDIATYPE_SERDES;
   10527 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   10528 		sc->sc_flags |= WM_F_SGMII;
   10529 		mediatype = WM_MEDIATYPE_COPPER;
   10530 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   10531 		sc->sc_flags |= WM_F_SGMII;
   10532 		mediatype = WM_MEDIATYPE_SERDES;
   10533 	}
   10534 
   10535 out:
   10536 	/* Restore I2C interface setting */
   10537 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10538 
   10539 	return mediatype;
   10540 }
   10541 
   10542 /*
   10543  * NVM related.
   10544  * Microwire, SPI (w/wo EERD) and Flash.
   10545  */
   10546 
   10547 /* Both spi and uwire */
   10548 
   10549 /*
   10550  * wm_eeprom_sendbits:
   10551  *
   10552  *	Send a series of bits to the EEPROM.
   10553  */
   10554 static void
   10555 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   10556 {
   10557 	uint32_t reg;
   10558 	int x;
   10559 
   10560 	reg = CSR_READ(sc, WMREG_EECD);
   10561 
   10562 	for (x = nbits; x > 0; x--) {
   10563 		if (bits & (1U << (x - 1)))
   10564 			reg |= EECD_DI;
   10565 		else
   10566 			reg &= ~EECD_DI;
   10567 		CSR_WRITE(sc, WMREG_EECD, reg);
   10568 		CSR_WRITE_FLUSH(sc);
   10569 		delay(2);
   10570 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10571 		CSR_WRITE_FLUSH(sc);
   10572 		delay(2);
   10573 		CSR_WRITE(sc, WMREG_EECD, reg);
   10574 		CSR_WRITE_FLUSH(sc);
   10575 		delay(2);
   10576 	}
   10577 }
   10578 
   10579 /*
   10580  * wm_eeprom_recvbits:
   10581  *
   10582  *	Receive a series of bits from the EEPROM.
   10583  */
   10584 static void
   10585 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   10586 {
   10587 	uint32_t reg, val;
   10588 	int x;
   10589 
   10590 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   10591 
   10592 	val = 0;
   10593 	for (x = nbits; x > 0; x--) {
   10594 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10595 		CSR_WRITE_FLUSH(sc);
   10596 		delay(2);
   10597 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   10598 			val |= (1U << (x - 1));
   10599 		CSR_WRITE(sc, WMREG_EECD, reg);
   10600 		CSR_WRITE_FLUSH(sc);
   10601 		delay(2);
   10602 	}
   10603 	*valp = val;
   10604 }
   10605 
   10606 /* Microwire */
   10607 
   10608 /*
   10609  * wm_nvm_read_uwire:
   10610  *
   10611  *	Read a word from the EEPROM using the MicroWire protocol.
   10612  */
   10613 static int
   10614 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10615 {
   10616 	uint32_t reg, val;
   10617 	int i;
   10618 
   10619 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10620 		device_xname(sc->sc_dev), __func__));
   10621 
   10622 	for (i = 0; i < wordcnt; i++) {
   10623 		/* Clear SK and DI. */
   10624 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   10625 		CSR_WRITE(sc, WMREG_EECD, reg);
   10626 
   10627 		/*
   10628 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   10629 		 * and Xen.
   10630 		 *
   10631 		 * We use this workaround only for 82540 because qemu's
   10632 		 * e1000 act as 82540.
   10633 		 */
   10634 		if (sc->sc_type == WM_T_82540) {
   10635 			reg |= EECD_SK;
   10636 			CSR_WRITE(sc, WMREG_EECD, reg);
   10637 			reg &= ~EECD_SK;
   10638 			CSR_WRITE(sc, WMREG_EECD, reg);
   10639 			CSR_WRITE_FLUSH(sc);
   10640 			delay(2);
   10641 		}
   10642 		/* XXX: end of workaround */
   10643 
   10644 		/* Set CHIP SELECT. */
   10645 		reg |= EECD_CS;
   10646 		CSR_WRITE(sc, WMREG_EECD, reg);
   10647 		CSR_WRITE_FLUSH(sc);
   10648 		delay(2);
   10649 
   10650 		/* Shift in the READ command. */
   10651 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   10652 
   10653 		/* Shift in address. */
   10654 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   10655 
   10656 		/* Shift out the data. */
   10657 		wm_eeprom_recvbits(sc, &val, 16);
   10658 		data[i] = val & 0xffff;
   10659 
   10660 		/* Clear CHIP SELECT. */
   10661 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   10662 		CSR_WRITE(sc, WMREG_EECD, reg);
   10663 		CSR_WRITE_FLUSH(sc);
   10664 		delay(2);
   10665 	}
   10666 
   10667 	return 0;
   10668 }
   10669 
   10670 /* SPI */
   10671 
   10672 /*
   10673  * Set SPI and FLASH related information from the EECD register.
   10674  * For 82541 and 82547, the word size is taken from EEPROM.
   10675  */
   10676 static int
   10677 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   10678 {
   10679 	int size;
   10680 	uint32_t reg;
   10681 	uint16_t data;
   10682 
   10683 	reg = CSR_READ(sc, WMREG_EECD);
   10684 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   10685 
   10686 	/* Read the size of NVM from EECD by default */
   10687 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10688 	switch (sc->sc_type) {
   10689 	case WM_T_82541:
   10690 	case WM_T_82541_2:
   10691 	case WM_T_82547:
   10692 	case WM_T_82547_2:
   10693 		/* Set dummy value to access EEPROM */
   10694 		sc->sc_nvm_wordsize = 64;
   10695 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   10696 		reg = data;
   10697 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10698 		if (size == 0)
   10699 			size = 6; /* 64 word size */
   10700 		else
   10701 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   10702 		break;
   10703 	case WM_T_80003:
   10704 	case WM_T_82571:
   10705 	case WM_T_82572:
   10706 	case WM_T_82573: /* SPI case */
   10707 	case WM_T_82574: /* SPI case */
   10708 	case WM_T_82583: /* SPI case */
   10709 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10710 		if (size > 14)
   10711 			size = 14;
   10712 		break;
   10713 	case WM_T_82575:
   10714 	case WM_T_82576:
   10715 	case WM_T_82580:
   10716 	case WM_T_I350:
   10717 	case WM_T_I354:
   10718 	case WM_T_I210:
   10719 	case WM_T_I211:
   10720 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10721 		if (size > 15)
   10722 			size = 15;
   10723 		break;
   10724 	default:
   10725 		aprint_error_dev(sc->sc_dev,
   10726 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   10727 		return -1;
   10728 		break;
   10729 	}
   10730 
   10731 	sc->sc_nvm_wordsize = 1 << size;
   10732 
   10733 	return 0;
   10734 }
   10735 
   10736 /*
   10737  * wm_nvm_ready_spi:
   10738  *
   10739  *	Wait for a SPI EEPROM to be ready for commands.
   10740  */
   10741 static int
   10742 wm_nvm_ready_spi(struct wm_softc *sc)
   10743 {
   10744 	uint32_t val;
   10745 	int usec;
   10746 
   10747 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10748 		device_xname(sc->sc_dev), __func__));
   10749 
   10750 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10751 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10752 		wm_eeprom_recvbits(sc, &val, 8);
   10753 		if ((val & SPI_SR_RDY) == 0)
   10754 			break;
   10755 	}
   10756 	if (usec >= SPI_MAX_RETRIES) {
   10757 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10758 		return 1;
   10759 	}
   10760 	return 0;
   10761 }
   10762 
   10763 /*
   10764  * wm_nvm_read_spi:
   10765  *
   10766  *	Read a work from the EEPROM using the SPI protocol.
   10767  */
   10768 static int
   10769 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10770 {
   10771 	uint32_t reg, val;
   10772 	int i;
   10773 	uint8_t opc;
   10774 
   10775 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10776 		device_xname(sc->sc_dev), __func__));
   10777 
   10778 	/* Clear SK and CS. */
   10779 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10780 	CSR_WRITE(sc, WMREG_EECD, reg);
   10781 	CSR_WRITE_FLUSH(sc);
   10782 	delay(2);
   10783 
   10784 	if (wm_nvm_ready_spi(sc))
   10785 		return 1;
   10786 
   10787 	/* Toggle CS to flush commands. */
   10788 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10789 	CSR_WRITE_FLUSH(sc);
   10790 	delay(2);
   10791 	CSR_WRITE(sc, WMREG_EECD, reg);
   10792 	CSR_WRITE_FLUSH(sc);
   10793 	delay(2);
   10794 
   10795 	opc = SPI_OPC_READ;
   10796 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10797 		opc |= SPI_OPC_A8;
   10798 
   10799 	wm_eeprom_sendbits(sc, opc, 8);
   10800 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10801 
   10802 	for (i = 0; i < wordcnt; i++) {
   10803 		wm_eeprom_recvbits(sc, &val, 16);
   10804 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10805 	}
   10806 
   10807 	/* Raise CS and clear SK. */
   10808 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10809 	CSR_WRITE(sc, WMREG_EECD, reg);
   10810 	CSR_WRITE_FLUSH(sc);
   10811 	delay(2);
   10812 
   10813 	return 0;
   10814 }
   10815 
   10816 /* Using with EERD */
   10817 
   10818 static int
   10819 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10820 {
   10821 	uint32_t attempts = 100000;
   10822 	uint32_t i, reg = 0;
   10823 	int32_t done = -1;
   10824 
   10825 	for (i = 0; i < attempts; i++) {
   10826 		reg = CSR_READ(sc, rw);
   10827 
   10828 		if (reg & EERD_DONE) {
   10829 			done = 0;
   10830 			break;
   10831 		}
   10832 		delay(5);
   10833 	}
   10834 
   10835 	return done;
   10836 }
   10837 
   10838 static int
   10839 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10840     uint16_t *data)
   10841 {
   10842 	int i, eerd = 0;
   10843 	int error = 0;
   10844 
   10845 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10846 		device_xname(sc->sc_dev), __func__));
   10847 
   10848 	for (i = 0; i < wordcnt; i++) {
   10849 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10850 
   10851 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10852 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10853 		if (error != 0)
   10854 			break;
   10855 
   10856 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10857 	}
   10858 
   10859 	return error;
   10860 }
   10861 
   10862 /* Flash */
   10863 
   10864 static int
   10865 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10866 {
   10867 	uint32_t eecd;
   10868 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10869 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10870 	uint8_t sig_byte = 0;
   10871 
   10872 	switch (sc->sc_type) {
   10873 	case WM_T_PCH_SPT:
   10874 		/*
   10875 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10876 		 * sector valid bits from the NVM.
   10877 		 */
   10878 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10879 		if ((*bank == 0) || (*bank == 1)) {
   10880 			aprint_error_dev(sc->sc_dev,
   10881 			    "%s: no valid NVM bank present (%u)\n", __func__,
   10882 				*bank);
   10883 			return -1;
   10884 		} else {
   10885 			*bank = *bank - 2;
   10886 			return 0;
   10887 		}
   10888 	case WM_T_ICH8:
   10889 	case WM_T_ICH9:
   10890 		eecd = CSR_READ(sc, WMREG_EECD);
   10891 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10892 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10893 			return 0;
   10894 		}
   10895 		/* FALLTHROUGH */
   10896 	default:
   10897 		/* Default to 0 */
   10898 		*bank = 0;
   10899 
   10900 		/* Check bank 0 */
   10901 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10902 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10903 			*bank = 0;
   10904 			return 0;
   10905 		}
   10906 
   10907 		/* Check bank 1 */
   10908 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10909 		    &sig_byte);
   10910 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10911 			*bank = 1;
   10912 			return 0;
   10913 		}
   10914 	}
   10915 
   10916 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10917 		device_xname(sc->sc_dev)));
   10918 	return -1;
   10919 }
   10920 
   10921 /******************************************************************************
   10922  * This function does initial flash setup so that a new read/write/erase cycle
   10923  * can be started.
   10924  *
   10925  * sc - The pointer to the hw structure
   10926  ****************************************************************************/
   10927 static int32_t
   10928 wm_ich8_cycle_init(struct wm_softc *sc)
   10929 {
   10930 	uint16_t hsfsts;
   10931 	int32_t error = 1;
   10932 	int32_t i     = 0;
   10933 
   10934 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10935 
   10936 	/* May be check the Flash Des Valid bit in Hw status */
   10937 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10938 		return error;
   10939 	}
   10940 
   10941 	/* Clear FCERR in Hw status by writing 1 */
   10942 	/* Clear DAEL in Hw status by writing a 1 */
   10943 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10944 
   10945 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10946 
   10947 	/*
   10948 	 * Either we should have a hardware SPI cycle in progress bit to check
   10949 	 * against, in order to start a new cycle or FDONE bit should be
   10950 	 * changed in the hardware so that it is 1 after harware reset, which
   10951 	 * can then be used as an indication whether a cycle is in progress or
   10952 	 * has been completed .. we should also have some software semaphore
   10953 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10954 	 * threads access to those bits can be sequentiallized or a way so that
   10955 	 * 2 threads dont start the cycle at the same time
   10956 	 */
   10957 
   10958 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10959 		/*
   10960 		 * There is no cycle running at present, so we can start a
   10961 		 * cycle
   10962 		 */
   10963 
   10964 		/* Begin by setting Flash Cycle Done. */
   10965 		hsfsts |= HSFSTS_DONE;
   10966 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10967 		error = 0;
   10968 	} else {
   10969 		/*
   10970 		 * otherwise poll for sometime so the current cycle has a
   10971 		 * chance to end before giving up.
   10972 		 */
   10973 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10974 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10975 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10976 				error = 0;
   10977 				break;
   10978 			}
   10979 			delay(1);
   10980 		}
   10981 		if (error == 0) {
   10982 			/*
   10983 			 * Successful in waiting for previous cycle to timeout,
   10984 			 * now set the Flash Cycle Done.
   10985 			 */
   10986 			hsfsts |= HSFSTS_DONE;
   10987 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10988 		}
   10989 	}
   10990 	return error;
   10991 }
   10992 
   10993 /******************************************************************************
   10994  * This function starts a flash cycle and waits for its completion
   10995  *
   10996  * sc - The pointer to the hw structure
   10997  ****************************************************************************/
   10998 static int32_t
   10999 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11000 {
   11001 	uint16_t hsflctl;
   11002 	uint16_t hsfsts;
   11003 	int32_t error = 1;
   11004 	uint32_t i = 0;
   11005 
   11006 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11007 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11008 	hsflctl |= HSFCTL_GO;
   11009 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11010 
   11011 	/* Wait till FDONE bit is set to 1 */
   11012 	do {
   11013 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11014 		if (hsfsts & HSFSTS_DONE)
   11015 			break;
   11016 		delay(1);
   11017 		i++;
   11018 	} while (i < timeout);
   11019 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11020 		error = 0;
   11021 
   11022 	return error;
   11023 }
   11024 
   11025 /******************************************************************************
   11026  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11027  *
   11028  * sc - The pointer to the hw structure
   11029  * index - The index of the byte or word to read.
   11030  * size - Size of data to read, 1=byte 2=word, 4=dword
   11031  * data - Pointer to the word to store the value read.
   11032  *****************************************************************************/
   11033 static int32_t
   11034 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11035     uint32_t size, uint32_t *data)
   11036 {
   11037 	uint16_t hsfsts;
   11038 	uint16_t hsflctl;
   11039 	uint32_t flash_linear_address;
   11040 	uint32_t flash_data = 0;
   11041 	int32_t error = 1;
   11042 	int32_t count = 0;
   11043 
   11044 	if (size < 1  || size > 4 || data == 0x0 ||
   11045 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11046 		return error;
   11047 
   11048 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11049 	    sc->sc_ich8_flash_base;
   11050 
   11051 	do {
   11052 		delay(1);
   11053 		/* Steps */
   11054 		error = wm_ich8_cycle_init(sc);
   11055 		if (error)
   11056 			break;
   11057 
   11058 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11059 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11060 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11061 		    & HSFCTL_BCOUNT_MASK;
   11062 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11063 		if (sc->sc_type == WM_T_PCH_SPT) {
   11064 			/*
   11065 			 * In SPT, This register is in Lan memory space, not
   11066 			 * flash. Therefore, only 32 bit access is supported.
   11067 			 */
   11068 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11069 			    (uint32_t)hsflctl);
   11070 		} else
   11071 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11072 
   11073 		/*
   11074 		 * Write the last 24 bits of index into Flash Linear address
   11075 		 * field in Flash Address
   11076 		 */
   11077 		/* TODO: TBD maybe check the index against the size of flash */
   11078 
   11079 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11080 
   11081 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11082 
   11083 		/*
   11084 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11085 		 * the whole sequence a few more times, else read in (shift in)
   11086 		 * the Flash Data0, the order is least significant byte first
   11087 		 * msb to lsb
   11088 		 */
   11089 		if (error == 0) {
   11090 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11091 			if (size == 1)
   11092 				*data = (uint8_t)(flash_data & 0x000000FF);
   11093 			else if (size == 2)
   11094 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11095 			else if (size == 4)
   11096 				*data = (uint32_t)flash_data;
   11097 			break;
   11098 		} else {
   11099 			/*
   11100 			 * If we've gotten here, then things are probably
   11101 			 * completely hosed, but if the error condition is
   11102 			 * detected, it won't hurt to give it another try...
   11103 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11104 			 */
   11105 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11106 			if (hsfsts & HSFSTS_ERR) {
   11107 				/* Repeat for some time before giving up. */
   11108 				continue;
   11109 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11110 				break;
   11111 		}
   11112 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11113 
   11114 	return error;
   11115 }
   11116 
   11117 /******************************************************************************
   11118  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11119  *
   11120  * sc - pointer to wm_hw structure
   11121  * index - The index of the byte to read.
   11122  * data - Pointer to a byte to store the value read.
   11123  *****************************************************************************/
   11124 static int32_t
   11125 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11126 {
   11127 	int32_t status;
   11128 	uint32_t word = 0;
   11129 
   11130 	status = wm_read_ich8_data(sc, index, 1, &word);
   11131 	if (status == 0)
   11132 		*data = (uint8_t)word;
   11133 	else
   11134 		*data = 0;
   11135 
   11136 	return status;
   11137 }
   11138 
   11139 /******************************************************************************
   11140  * Reads a word from the NVM using the ICH8 flash access registers.
   11141  *
   11142  * sc - pointer to wm_hw structure
   11143  * index - The starting byte index of the word to read.
   11144  * data - Pointer to a word to store the value read.
   11145  *****************************************************************************/
   11146 static int32_t
   11147 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11148 {
   11149 	int32_t status;
   11150 	uint32_t word = 0;
   11151 
   11152 	status = wm_read_ich8_data(sc, index, 2, &word);
   11153 	if (status == 0)
   11154 		*data = (uint16_t)word;
   11155 	else
   11156 		*data = 0;
   11157 
   11158 	return status;
   11159 }
   11160 
   11161 /******************************************************************************
   11162  * Reads a dword from the NVM using the ICH8 flash access registers.
   11163  *
   11164  * sc - pointer to wm_hw structure
   11165  * index - The starting byte index of the word to read.
   11166  * data - Pointer to a word to store the value read.
   11167  *****************************************************************************/
   11168 static int32_t
   11169 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11170 {
   11171 	int32_t status;
   11172 
   11173 	status = wm_read_ich8_data(sc, index, 4, data);
   11174 	return status;
   11175 }
   11176 
   11177 /******************************************************************************
   11178  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11179  * register.
   11180  *
   11181  * sc - Struct containing variables accessed by shared code
   11182  * offset - offset of word in the EEPROM to read
   11183  * data - word read from the EEPROM
   11184  * words - number of words to read
   11185  *****************************************************************************/
   11186 static int
   11187 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11188 {
   11189 	int32_t  error = 0;
   11190 	uint32_t flash_bank = 0;
   11191 	uint32_t act_offset = 0;
   11192 	uint32_t bank_offset = 0;
   11193 	uint16_t word = 0;
   11194 	uint16_t i = 0;
   11195 
   11196 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11197 		device_xname(sc->sc_dev), __func__));
   11198 
   11199 	/*
   11200 	 * We need to know which is the valid flash bank.  In the event
   11201 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11202 	 * managing flash_bank.  So it cannot be trusted and needs
   11203 	 * to be updated with each read.
   11204 	 */
   11205 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11206 	if (error) {
   11207 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11208 			device_xname(sc->sc_dev)));
   11209 		flash_bank = 0;
   11210 	}
   11211 
   11212 	/*
   11213 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11214 	 * size
   11215 	 */
   11216 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11217 
   11218 	error = wm_get_swfwhw_semaphore(sc);
   11219 	if (error) {
   11220 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11221 		    __func__);
   11222 		return error;
   11223 	}
   11224 
   11225 	for (i = 0; i < words; i++) {
   11226 		/* The NVM part needs a byte offset, hence * 2 */
   11227 		act_offset = bank_offset + ((offset + i) * 2);
   11228 		error = wm_read_ich8_word(sc, act_offset, &word);
   11229 		if (error) {
   11230 			aprint_error_dev(sc->sc_dev,
   11231 			    "%s: failed to read NVM\n", __func__);
   11232 			break;
   11233 		}
   11234 		data[i] = word;
   11235 	}
   11236 
   11237 	wm_put_swfwhw_semaphore(sc);
   11238 	return error;
   11239 }
   11240 
   11241 /******************************************************************************
   11242  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11243  * register.
   11244  *
   11245  * sc - Struct containing variables accessed by shared code
   11246  * offset - offset of word in the EEPROM to read
   11247  * data - word read from the EEPROM
   11248  * words - number of words to read
   11249  *****************************************************************************/
   11250 static int
   11251 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11252 {
   11253 	int32_t  error = 0;
   11254 	uint32_t flash_bank = 0;
   11255 	uint32_t act_offset = 0;
   11256 	uint32_t bank_offset = 0;
   11257 	uint32_t dword = 0;
   11258 	uint16_t i = 0;
   11259 
   11260 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11261 		device_xname(sc->sc_dev), __func__));
   11262 
   11263 	/*
   11264 	 * We need to know which is the valid flash bank.  In the event
   11265 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11266 	 * managing flash_bank.  So it cannot be trusted and needs
   11267 	 * to be updated with each read.
   11268 	 */
   11269 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11270 	if (error) {
   11271 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11272 			device_xname(sc->sc_dev)));
   11273 		flash_bank = 0;
   11274 	}
   11275 
   11276 	/*
   11277 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11278 	 * size
   11279 	 */
   11280 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11281 
   11282 	error = wm_get_swfwhw_semaphore(sc);
   11283 	if (error) {
   11284 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11285 		    __func__);
   11286 		return error;
   11287 	}
   11288 
   11289 	for (i = 0; i < words; i++) {
   11290 		/* The NVM part needs a byte offset, hence * 2 */
   11291 		act_offset = bank_offset + ((offset + i) * 2);
   11292 		/* but we must read dword aligned, so mask ... */
   11293 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11294 		if (error) {
   11295 			aprint_error_dev(sc->sc_dev,
   11296 			    "%s: failed to read NVM\n", __func__);
   11297 			break;
   11298 		}
   11299 		/* ... and pick out low or high word */
   11300 		if ((act_offset & 0x2) == 0)
   11301 			data[i] = (uint16_t)(dword & 0xFFFF);
   11302 		else
   11303 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11304 	}
   11305 
   11306 	wm_put_swfwhw_semaphore(sc);
   11307 	return error;
   11308 }
   11309 
   11310 /* iNVM */
   11311 
   11312 static int
   11313 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11314 {
   11315 	int32_t  rv = 0;
   11316 	uint32_t invm_dword;
   11317 	uint16_t i;
   11318 	uint8_t record_type, word_address;
   11319 
   11320 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11321 		device_xname(sc->sc_dev), __func__));
   11322 
   11323 	for (i = 0; i < INVM_SIZE; i++) {
   11324 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11325 		/* Get record type */
   11326 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11327 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11328 			break;
   11329 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11330 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11331 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11332 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11333 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11334 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11335 			if (word_address == address) {
   11336 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11337 				rv = 0;
   11338 				break;
   11339 			}
   11340 		}
   11341 	}
   11342 
   11343 	return rv;
   11344 }
   11345 
   11346 static int
   11347 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11348 {
   11349 	int rv = 0;
   11350 	int i;
   11351 
   11352 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11353 		device_xname(sc->sc_dev), __func__));
   11354 
   11355 	for (i = 0; i < words; i++) {
   11356 		switch (offset + i) {
   11357 		case NVM_OFF_MACADDR:
   11358 		case NVM_OFF_MACADDR1:
   11359 		case NVM_OFF_MACADDR2:
   11360 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11361 			if (rv != 0) {
   11362 				data[i] = 0xffff;
   11363 				rv = -1;
   11364 			}
   11365 			break;
   11366 		case NVM_OFF_CFG2:
   11367 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11368 			if (rv != 0) {
   11369 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11370 				rv = 0;
   11371 			}
   11372 			break;
   11373 		case NVM_OFF_CFG4:
   11374 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11375 			if (rv != 0) {
   11376 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11377 				rv = 0;
   11378 			}
   11379 			break;
   11380 		case NVM_OFF_LED_1_CFG:
   11381 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11382 			if (rv != 0) {
   11383 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11384 				rv = 0;
   11385 			}
   11386 			break;
   11387 		case NVM_OFF_LED_0_2_CFG:
   11388 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11389 			if (rv != 0) {
   11390 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11391 				rv = 0;
   11392 			}
   11393 			break;
   11394 		case NVM_OFF_ID_LED_SETTINGS:
   11395 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11396 			if (rv != 0) {
   11397 				*data = ID_LED_RESERVED_FFFF;
   11398 				rv = 0;
   11399 			}
   11400 			break;
   11401 		default:
   11402 			DPRINTF(WM_DEBUG_NVM,
   11403 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11404 			*data = NVM_RESERVED_WORD;
   11405 			break;
   11406 		}
   11407 	}
   11408 
   11409 	return rv;
   11410 }
   11411 
   11412 /* Lock, detecting NVM type, validate checksum, version and read */
   11413 
   11414 /*
   11415  * wm_nvm_acquire:
   11416  *
   11417  *	Perform the EEPROM handshake required on some chips.
   11418  */
   11419 static int
   11420 wm_nvm_acquire(struct wm_softc *sc)
   11421 {
   11422 	uint32_t reg;
   11423 	int x;
   11424 	int ret = 0;
   11425 
   11426 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11427 		device_xname(sc->sc_dev), __func__));
   11428 
   11429 	if (sc->sc_type >= WM_T_ICH8) {
   11430 		ret = wm_get_nvm_ich8lan(sc);
   11431 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11432 		ret = wm_get_swfwhw_semaphore(sc);
   11433 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11434 		/* This will also do wm_get_swsm_semaphore() if needed */
   11435 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11436 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11437 		ret = wm_get_swsm_semaphore(sc);
   11438 	}
   11439 
   11440 	if (ret) {
   11441 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11442 			__func__);
   11443 		return 1;
   11444 	}
   11445 
   11446 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11447 		reg = CSR_READ(sc, WMREG_EECD);
   11448 
   11449 		/* Request EEPROM access. */
   11450 		reg |= EECD_EE_REQ;
   11451 		CSR_WRITE(sc, WMREG_EECD, reg);
   11452 
   11453 		/* ..and wait for it to be granted. */
   11454 		for (x = 0; x < 1000; x++) {
   11455 			reg = CSR_READ(sc, WMREG_EECD);
   11456 			if (reg & EECD_EE_GNT)
   11457 				break;
   11458 			delay(5);
   11459 		}
   11460 		if ((reg & EECD_EE_GNT) == 0) {
   11461 			aprint_error_dev(sc->sc_dev,
   11462 			    "could not acquire EEPROM GNT\n");
   11463 			reg &= ~EECD_EE_REQ;
   11464 			CSR_WRITE(sc, WMREG_EECD, reg);
   11465 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11466 				wm_put_swfwhw_semaphore(sc);
   11467 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   11468 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11469 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11470 				wm_put_swsm_semaphore(sc);
   11471 			return 1;
   11472 		}
   11473 	}
   11474 
   11475 	return 0;
   11476 }
   11477 
   11478 /*
   11479  * wm_nvm_release:
   11480  *
   11481  *	Release the EEPROM mutex.
   11482  */
   11483 static void
   11484 wm_nvm_release(struct wm_softc *sc)
   11485 {
   11486 	uint32_t reg;
   11487 
   11488 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11489 		device_xname(sc->sc_dev), __func__));
   11490 
   11491 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11492 		reg = CSR_READ(sc, WMREG_EECD);
   11493 		reg &= ~EECD_EE_REQ;
   11494 		CSR_WRITE(sc, WMREG_EECD, reg);
   11495 	}
   11496 
   11497 	if (sc->sc_type >= WM_T_ICH8) {
   11498 		wm_put_nvm_ich8lan(sc);
   11499 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11500 		wm_put_swfwhw_semaphore(sc);
   11501 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   11502 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11503 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11504 		wm_put_swsm_semaphore(sc);
   11505 }
   11506 
   11507 static int
   11508 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   11509 {
   11510 	uint32_t eecd = 0;
   11511 
   11512 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   11513 	    || sc->sc_type == WM_T_82583) {
   11514 		eecd = CSR_READ(sc, WMREG_EECD);
   11515 
   11516 		/* Isolate bits 15 & 16 */
   11517 		eecd = ((eecd >> 15) & 0x03);
   11518 
   11519 		/* If both bits are set, device is Flash type */
   11520 		if (eecd == 0x03)
   11521 			return 0;
   11522 	}
   11523 	return 1;
   11524 }
   11525 
   11526 static int
   11527 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   11528 {
   11529 	uint32_t eec;
   11530 
   11531 	eec = CSR_READ(sc, WMREG_EEC);
   11532 	if ((eec & EEC_FLASH_DETECTED) != 0)
   11533 		return 1;
   11534 
   11535 	return 0;
   11536 }
   11537 
   11538 /*
   11539  * wm_nvm_validate_checksum
   11540  *
   11541  * The checksum is defined as the sum of the first 64 (16 bit) words.
   11542  */
   11543 static int
   11544 wm_nvm_validate_checksum(struct wm_softc *sc)
   11545 {
   11546 	uint16_t checksum;
   11547 	uint16_t eeprom_data;
   11548 #ifdef WM_DEBUG
   11549 	uint16_t csum_wordaddr, valid_checksum;
   11550 #endif
   11551 	int i;
   11552 
   11553 	checksum = 0;
   11554 
   11555 	/* Don't check for I211 */
   11556 	if (sc->sc_type == WM_T_I211)
   11557 		return 0;
   11558 
   11559 #ifdef WM_DEBUG
   11560 	if (sc->sc_type == WM_T_PCH_LPT) {
   11561 		csum_wordaddr = NVM_OFF_COMPAT;
   11562 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   11563 	} else {
   11564 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   11565 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   11566 	}
   11567 
   11568 	/* Dump EEPROM image for debug */
   11569 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11570 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11571 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   11572 		/* XXX PCH_SPT? */
   11573 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   11574 		if ((eeprom_data & valid_checksum) == 0) {
   11575 			DPRINTF(WM_DEBUG_NVM,
   11576 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   11577 				device_xname(sc->sc_dev), eeprom_data,
   11578 				    valid_checksum));
   11579 		}
   11580 	}
   11581 
   11582 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   11583 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   11584 		for (i = 0; i < NVM_SIZE; i++) {
   11585 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11586 				printf("XXXX ");
   11587 			else
   11588 				printf("%04hx ", eeprom_data);
   11589 			if (i % 8 == 7)
   11590 				printf("\n");
   11591 		}
   11592 	}
   11593 
   11594 #endif /* WM_DEBUG */
   11595 
   11596 	for (i = 0; i < NVM_SIZE; i++) {
   11597 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11598 			return 1;
   11599 		checksum += eeprom_data;
   11600 	}
   11601 
   11602 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   11603 #ifdef WM_DEBUG
   11604 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   11605 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   11606 #endif
   11607 	}
   11608 
   11609 	return 0;
   11610 }
   11611 
   11612 static void
   11613 wm_nvm_version_invm(struct wm_softc *sc)
   11614 {
   11615 	uint32_t dword;
   11616 
   11617 	/*
   11618 	 * Linux's code to decode version is very strange, so we don't
   11619 	 * obey that algorithm and just use word 61 as the document.
   11620 	 * Perhaps it's not perfect though...
   11621 	 *
   11622 	 * Example:
   11623 	 *
   11624 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   11625 	 */
   11626 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   11627 	dword = __SHIFTOUT(dword, INVM_VER_1);
   11628 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   11629 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   11630 }
   11631 
   11632 static void
   11633 wm_nvm_version(struct wm_softc *sc)
   11634 {
   11635 	uint16_t major, minor, build, patch;
   11636 	uint16_t uid0, uid1;
   11637 	uint16_t nvm_data;
   11638 	uint16_t off;
   11639 	bool check_version = false;
   11640 	bool check_optionrom = false;
   11641 	bool have_build = false;
   11642 
   11643 	/*
   11644 	 * Version format:
   11645 	 *
   11646 	 * XYYZ
   11647 	 * X0YZ
   11648 	 * X0YY
   11649 	 *
   11650 	 * Example:
   11651 	 *
   11652 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   11653 	 *	82571	0x50a6	5.10.6?
   11654 	 *	82572	0x506a	5.6.10?
   11655 	 *	82572EI	0x5069	5.6.9?
   11656 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   11657 	 *		0x2013	2.1.3?
   11658 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   11659 	 */
   11660 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   11661 	switch (sc->sc_type) {
   11662 	case WM_T_82571:
   11663 	case WM_T_82572:
   11664 	case WM_T_82574:
   11665 	case WM_T_82583:
   11666 		check_version = true;
   11667 		check_optionrom = true;
   11668 		have_build = true;
   11669 		break;
   11670 	case WM_T_82575:
   11671 	case WM_T_82576:
   11672 	case WM_T_82580:
   11673 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   11674 			check_version = true;
   11675 		break;
   11676 	case WM_T_I211:
   11677 		wm_nvm_version_invm(sc);
   11678 		goto printver;
   11679 	case WM_T_I210:
   11680 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   11681 			wm_nvm_version_invm(sc);
   11682 			goto printver;
   11683 		}
   11684 		/* FALLTHROUGH */
   11685 	case WM_T_I350:
   11686 	case WM_T_I354:
   11687 		check_version = true;
   11688 		check_optionrom = true;
   11689 		break;
   11690 	default:
   11691 		return;
   11692 	}
   11693 	if (check_version) {
   11694 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   11695 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   11696 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   11697 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   11698 			build = nvm_data & NVM_BUILD_MASK;
   11699 			have_build = true;
   11700 		} else
   11701 			minor = nvm_data & 0x00ff;
   11702 
   11703 		/* Decimal */
   11704 		minor = (minor / 16) * 10 + (minor % 16);
   11705 		sc->sc_nvm_ver_major = major;
   11706 		sc->sc_nvm_ver_minor = minor;
   11707 
   11708 printver:
   11709 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   11710 		    sc->sc_nvm_ver_minor);
   11711 		if (have_build) {
   11712 			sc->sc_nvm_ver_build = build;
   11713 			aprint_verbose(".%d", build);
   11714 		}
   11715 	}
   11716 	if (check_optionrom) {
   11717 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   11718 		/* Option ROM Version */
   11719 		if ((off != 0x0000) && (off != 0xffff)) {
   11720 			off += NVM_COMBO_VER_OFF;
   11721 			wm_nvm_read(sc, off + 1, 1, &uid1);
   11722 			wm_nvm_read(sc, off, 1, &uid0);
   11723 			if ((uid0 != 0) && (uid0 != 0xffff)
   11724 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   11725 				/* 16bits */
   11726 				major = uid0 >> 8;
   11727 				build = (uid0 << 8) | (uid1 >> 8);
   11728 				patch = uid1 & 0x00ff;
   11729 				aprint_verbose(", option ROM Version %d.%d.%d",
   11730 				    major, build, patch);
   11731 			}
   11732 		}
   11733 	}
   11734 
   11735 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   11736 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   11737 }
   11738 
   11739 /*
   11740  * wm_nvm_read:
   11741  *
   11742  *	Read data from the serial EEPROM.
   11743  */
   11744 static int
   11745 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11746 {
   11747 	int rv;
   11748 
   11749 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11750 		device_xname(sc->sc_dev), __func__));
   11751 
   11752 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   11753 		return 1;
   11754 
   11755 	if (wm_nvm_acquire(sc))
   11756 		return 1;
   11757 
   11758 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11759 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11760 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11761 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11762 	else if (sc->sc_type == WM_T_PCH_SPT)
   11763 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11764 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11765 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11766 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11767 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11768 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11769 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11770 	else
   11771 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11772 
   11773 	wm_nvm_release(sc);
   11774 	return rv;
   11775 }
   11776 
   11777 /*
   11778  * Hardware semaphores.
   11779  * Very complexed...
   11780  */
   11781 
   11782 static int
   11783 wm_get_null(struct wm_softc *sc)
   11784 {
   11785 
   11786 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11787 		device_xname(sc->sc_dev), __func__));
   11788 	return 0;
   11789 }
   11790 
   11791 static void
   11792 wm_put_null(struct wm_softc *sc)
   11793 {
   11794 
   11795 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11796 		device_xname(sc->sc_dev), __func__));
   11797 	return;
   11798 }
   11799 
   11800 /*
   11801  * Get hardware semaphore.
   11802  * Same as e1000_get_hw_semaphore_generic()
   11803  */
   11804 static int
   11805 wm_get_swsm_semaphore(struct wm_softc *sc)
   11806 {
   11807 	int32_t timeout;
   11808 	uint32_t swsm;
   11809 
   11810 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11811 		device_xname(sc->sc_dev), __func__));
   11812 	KASSERT(sc->sc_nvm_wordsize > 0);
   11813 
   11814 	/* Get the SW semaphore. */
   11815 	timeout = sc->sc_nvm_wordsize + 1;
   11816 	while (timeout) {
   11817 		swsm = CSR_READ(sc, WMREG_SWSM);
   11818 
   11819 		if ((swsm & SWSM_SMBI) == 0)
   11820 			break;
   11821 
   11822 		delay(50);
   11823 		timeout--;
   11824 	}
   11825 
   11826 	if (timeout == 0) {
   11827 		aprint_error_dev(sc->sc_dev,
   11828 		    "could not acquire SWSM SMBI\n");
   11829 		return 1;
   11830 	}
   11831 
   11832 	/* Get the FW semaphore. */
   11833 	timeout = sc->sc_nvm_wordsize + 1;
   11834 	while (timeout) {
   11835 		swsm = CSR_READ(sc, WMREG_SWSM);
   11836 		swsm |= SWSM_SWESMBI;
   11837 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11838 		/* If we managed to set the bit we got the semaphore. */
   11839 		swsm = CSR_READ(sc, WMREG_SWSM);
   11840 		if (swsm & SWSM_SWESMBI)
   11841 			break;
   11842 
   11843 		delay(50);
   11844 		timeout--;
   11845 	}
   11846 
   11847 	if (timeout == 0) {
   11848 		aprint_error_dev(sc->sc_dev,
   11849 		    "could not acquire SWSM SWESMBI\n");
   11850 		/* Release semaphores */
   11851 		wm_put_swsm_semaphore(sc);
   11852 		return 1;
   11853 	}
   11854 	return 0;
   11855 }
   11856 
   11857 /*
   11858  * Put hardware semaphore.
   11859  * Same as e1000_put_hw_semaphore_generic()
   11860  */
   11861 static void
   11862 wm_put_swsm_semaphore(struct wm_softc *sc)
   11863 {
   11864 	uint32_t swsm;
   11865 
   11866 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11867 		device_xname(sc->sc_dev), __func__));
   11868 
   11869 	swsm = CSR_READ(sc, WMREG_SWSM);
   11870 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11871 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11872 }
   11873 
   11874 /*
   11875  * Get SW/FW semaphore.
   11876  * Same as e1000_acquire_swfw_sync_82575().
   11877  */
   11878 static int
   11879 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11880 {
   11881 	uint32_t swfw_sync;
   11882 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11883 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11884 	int timeout = 200;
   11885 
   11886 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11887 		device_xname(sc->sc_dev), __func__));
   11888 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11889 
   11890 	for (timeout = 0; timeout < 200; timeout++) {
   11891 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11892 			if (wm_get_swsm_semaphore(sc)) {
   11893 				aprint_error_dev(sc->sc_dev,
   11894 				    "%s: failed to get semaphore\n",
   11895 				    __func__);
   11896 				return 1;
   11897 			}
   11898 		}
   11899 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11900 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11901 			swfw_sync |= swmask;
   11902 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11903 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11904 				wm_put_swsm_semaphore(sc);
   11905 			return 0;
   11906 		}
   11907 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11908 			wm_put_swsm_semaphore(sc);
   11909 		delay(5000);
   11910 	}
   11911 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11912 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11913 	return 1;
   11914 }
   11915 
   11916 static void
   11917 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11918 {
   11919 	uint32_t swfw_sync;
   11920 
   11921 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11922 		device_xname(sc->sc_dev), __func__));
   11923 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11924 
   11925 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11926 		while (wm_get_swsm_semaphore(sc) != 0)
   11927 			continue;
   11928 	}
   11929 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11930 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11931 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11932 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11933 		wm_put_swsm_semaphore(sc);
   11934 }
   11935 
   11936 static int
   11937 wm_get_phy_82575(struct wm_softc *sc)
   11938 {
   11939 
   11940 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11941 		device_xname(sc->sc_dev), __func__));
   11942 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11943 }
   11944 
   11945 static void
   11946 wm_put_phy_82575(struct wm_softc *sc)
   11947 {
   11948 
   11949 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11950 		device_xname(sc->sc_dev), __func__));
   11951 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11952 }
   11953 
   11954 static int
   11955 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11956 {
   11957 	uint32_t ext_ctrl;
   11958 	int timeout = 200;
   11959 
   11960 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11961 		device_xname(sc->sc_dev), __func__));
   11962 
   11963 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11964 	for (timeout = 0; timeout < 200; timeout++) {
   11965 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11966 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11967 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11968 
   11969 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11970 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11971 			return 0;
   11972 		delay(5000);
   11973 	}
   11974 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11975 	    device_xname(sc->sc_dev), ext_ctrl);
   11976 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11977 	return 1;
   11978 }
   11979 
   11980 static void
   11981 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11982 {
   11983 	uint32_t ext_ctrl;
   11984 
   11985 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11986 		device_xname(sc->sc_dev), __func__));
   11987 
   11988 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11989 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11990 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11991 
   11992 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11993 }
   11994 
   11995 static int
   11996 wm_get_swflag_ich8lan(struct wm_softc *sc)
   11997 {
   11998 	uint32_t ext_ctrl;
   11999 	int timeout;
   12000 
   12001 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12002 		device_xname(sc->sc_dev), __func__));
   12003 	mutex_enter(sc->sc_ich_phymtx);
   12004 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12005 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12006 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12007 			break;
   12008 		delay(1000);
   12009 	}
   12010 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12011 		printf("%s: SW has already locked the resource\n",
   12012 		    device_xname(sc->sc_dev));
   12013 		goto out;
   12014 	}
   12015 
   12016 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12017 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12018 	for (timeout = 0; timeout < 1000; timeout++) {
   12019 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12020 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12021 			break;
   12022 		delay(1000);
   12023 	}
   12024 	if (timeout >= 1000) {
   12025 		printf("%s: failed to acquire semaphore\n",
   12026 		    device_xname(sc->sc_dev));
   12027 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12028 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12029 		goto out;
   12030 	}
   12031 	return 0;
   12032 
   12033 out:
   12034 	mutex_exit(sc->sc_ich_phymtx);
   12035 	return 1;
   12036 }
   12037 
   12038 static void
   12039 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12040 {
   12041 	uint32_t ext_ctrl;
   12042 
   12043 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12044 		device_xname(sc->sc_dev), __func__));
   12045 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12046 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12047 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12048 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12049 	} else {
   12050 		printf("%s: Semaphore unexpectedly released\n",
   12051 		    device_xname(sc->sc_dev));
   12052 	}
   12053 
   12054 	mutex_exit(sc->sc_ich_phymtx);
   12055 }
   12056 
   12057 static int
   12058 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12059 {
   12060 
   12061 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12062 		device_xname(sc->sc_dev), __func__));
   12063 	mutex_enter(sc->sc_ich_nvmmtx);
   12064 
   12065 	return 0;
   12066 }
   12067 
   12068 static void
   12069 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12070 {
   12071 
   12072 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12073 		device_xname(sc->sc_dev), __func__));
   12074 	mutex_exit(sc->sc_ich_nvmmtx);
   12075 }
   12076 
   12077 static int
   12078 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12079 {
   12080 	int i = 0;
   12081 	uint32_t reg;
   12082 
   12083 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12084 		device_xname(sc->sc_dev), __func__));
   12085 
   12086 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12087 	do {
   12088 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12089 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12090 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12091 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12092 			break;
   12093 		delay(2*1000);
   12094 		i++;
   12095 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12096 
   12097 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12098 		wm_put_hw_semaphore_82573(sc);
   12099 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12100 		    device_xname(sc->sc_dev));
   12101 		return -1;
   12102 	}
   12103 
   12104 	return 0;
   12105 }
   12106 
   12107 static void
   12108 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12109 {
   12110 	uint32_t reg;
   12111 
   12112 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12113 		device_xname(sc->sc_dev), __func__));
   12114 
   12115 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12116 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12117 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12118 }
   12119 
   12120 /*
   12121  * Management mode and power management related subroutines.
   12122  * BMC, AMT, suspend/resume and EEE.
   12123  */
   12124 
   12125 #ifdef WM_WOL
   12126 static int
   12127 wm_check_mng_mode(struct wm_softc *sc)
   12128 {
   12129 	int rv;
   12130 
   12131 	switch (sc->sc_type) {
   12132 	case WM_T_ICH8:
   12133 	case WM_T_ICH9:
   12134 	case WM_T_ICH10:
   12135 	case WM_T_PCH:
   12136 	case WM_T_PCH2:
   12137 	case WM_T_PCH_LPT:
   12138 	case WM_T_PCH_SPT:
   12139 		rv = wm_check_mng_mode_ich8lan(sc);
   12140 		break;
   12141 	case WM_T_82574:
   12142 	case WM_T_82583:
   12143 		rv = wm_check_mng_mode_82574(sc);
   12144 		break;
   12145 	case WM_T_82571:
   12146 	case WM_T_82572:
   12147 	case WM_T_82573:
   12148 	case WM_T_80003:
   12149 		rv = wm_check_mng_mode_generic(sc);
   12150 		break;
   12151 	default:
   12152 		/* noting to do */
   12153 		rv = 0;
   12154 		break;
   12155 	}
   12156 
   12157 	return rv;
   12158 }
   12159 
   12160 static int
   12161 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12162 {
   12163 	uint32_t fwsm;
   12164 
   12165 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12166 
   12167 	if (((fwsm & FWSM_FW_VALID) != 0)
   12168 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12169 		return 1;
   12170 
   12171 	return 0;
   12172 }
   12173 
   12174 static int
   12175 wm_check_mng_mode_82574(struct wm_softc *sc)
   12176 {
   12177 	uint16_t data;
   12178 
   12179 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12180 
   12181 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12182 		return 1;
   12183 
   12184 	return 0;
   12185 }
   12186 
   12187 static int
   12188 wm_check_mng_mode_generic(struct wm_softc *sc)
   12189 {
   12190 	uint32_t fwsm;
   12191 
   12192 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12193 
   12194 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12195 		return 1;
   12196 
   12197 	return 0;
   12198 }
   12199 #endif /* WM_WOL */
   12200 
   12201 static int
   12202 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12203 {
   12204 	uint32_t manc, fwsm, factps;
   12205 
   12206 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12207 		return 0;
   12208 
   12209 	manc = CSR_READ(sc, WMREG_MANC);
   12210 
   12211 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12212 		device_xname(sc->sc_dev), manc));
   12213 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12214 		return 0;
   12215 
   12216 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12217 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12218 		factps = CSR_READ(sc, WMREG_FACTPS);
   12219 		if (((factps & FACTPS_MNGCG) == 0)
   12220 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12221 			return 1;
   12222 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12223 		uint16_t data;
   12224 
   12225 		factps = CSR_READ(sc, WMREG_FACTPS);
   12226 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12227 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12228 			device_xname(sc->sc_dev), factps, data));
   12229 		if (((factps & FACTPS_MNGCG) == 0)
   12230 		    && ((data & NVM_CFG2_MNGM_MASK)
   12231 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12232 			return 1;
   12233 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12234 	    && ((manc & MANC_ASF_EN) == 0))
   12235 		return 1;
   12236 
   12237 	return 0;
   12238 }
   12239 
   12240 static bool
   12241 wm_phy_resetisblocked(struct wm_softc *sc)
   12242 {
   12243 	bool blocked = false;
   12244 	uint32_t reg;
   12245 	int i = 0;
   12246 
   12247 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12248 		device_xname(sc->sc_dev), __func__));
   12249 
   12250 	switch (sc->sc_type) {
   12251 	case WM_T_ICH8:
   12252 	case WM_T_ICH9:
   12253 	case WM_T_ICH10:
   12254 	case WM_T_PCH:
   12255 	case WM_T_PCH2:
   12256 	case WM_T_PCH_LPT:
   12257 	case WM_T_PCH_SPT:
   12258 		do {
   12259 			reg = CSR_READ(sc, WMREG_FWSM);
   12260 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12261 				blocked = true;
   12262 				delay(10*1000);
   12263 				continue;
   12264 			}
   12265 			blocked = false;
   12266 		} while (blocked && (i++ < 30));
   12267 		return blocked;
   12268 		break;
   12269 	case WM_T_82571:
   12270 	case WM_T_82572:
   12271 	case WM_T_82573:
   12272 	case WM_T_82574:
   12273 	case WM_T_82583:
   12274 	case WM_T_80003:
   12275 		reg = CSR_READ(sc, WMREG_MANC);
   12276 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12277 			return true;
   12278 		else
   12279 			return false;
   12280 		break;
   12281 	default:
   12282 		/* no problem */
   12283 		break;
   12284 	}
   12285 
   12286 	return false;
   12287 }
   12288 
   12289 static void
   12290 wm_get_hw_control(struct wm_softc *sc)
   12291 {
   12292 	uint32_t reg;
   12293 
   12294 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12295 		device_xname(sc->sc_dev), __func__));
   12296 
   12297 	if (sc->sc_type == WM_T_82573) {
   12298 		reg = CSR_READ(sc, WMREG_SWSM);
   12299 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12300 	} else if (sc->sc_type >= WM_T_82571) {
   12301 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12302 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12303 	}
   12304 }
   12305 
   12306 static void
   12307 wm_release_hw_control(struct wm_softc *sc)
   12308 {
   12309 	uint32_t reg;
   12310 
   12311 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12312 		device_xname(sc->sc_dev), __func__));
   12313 
   12314 	if (sc->sc_type == WM_T_82573) {
   12315 		reg = CSR_READ(sc, WMREG_SWSM);
   12316 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12317 	} else if (sc->sc_type >= WM_T_82571) {
   12318 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12319 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12320 	}
   12321 }
   12322 
   12323 static void
   12324 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12325 {
   12326 	uint32_t reg;
   12327 
   12328 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12329 		device_xname(sc->sc_dev), __func__));
   12330 
   12331 	if (sc->sc_type < WM_T_PCH2)
   12332 		return;
   12333 
   12334 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12335 
   12336 	if (gate)
   12337 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12338 	else
   12339 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12340 
   12341 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12342 }
   12343 
   12344 static void
   12345 wm_smbustopci(struct wm_softc *sc)
   12346 {
   12347 	uint32_t fwsm, reg;
   12348 	int rv = 0;
   12349 
   12350 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12351 		device_xname(sc->sc_dev), __func__));
   12352 
   12353 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12354 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12355 
   12356 	/* Disable ULP */
   12357 	wm_ulp_disable(sc);
   12358 
   12359 	/* Acquire PHY semaphore */
   12360 	sc->phy.acquire(sc);
   12361 
   12362 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12363 	switch (sc->sc_type) {
   12364 	case WM_T_PCH_LPT:
   12365 	case WM_T_PCH_SPT:
   12366 		if (wm_phy_is_accessible_pchlan(sc))
   12367 			break;
   12368 
   12369 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12370 		reg |= CTRL_EXT_FORCE_SMBUS;
   12371 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12372 #if 0
   12373 		/* XXX Isn't this required??? */
   12374 		CSR_WRITE_FLUSH(sc);
   12375 #endif
   12376 		delay(50 * 1000);
   12377 		/* FALLTHROUGH */
   12378 	case WM_T_PCH2:
   12379 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12380 			break;
   12381 		/* FALLTHROUGH */
   12382 	case WM_T_PCH:
   12383 		if (sc->sc_type == WM_T_PCH)
   12384 			if ((fwsm & FWSM_FW_VALID) != 0)
   12385 				break;
   12386 
   12387 		if (wm_phy_resetisblocked(sc) == true) {
   12388 			printf("XXX reset is blocked(3)\n");
   12389 			break;
   12390 		}
   12391 
   12392 		wm_toggle_lanphypc_pch_lpt(sc);
   12393 
   12394 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12395 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12396 				break;
   12397 
   12398 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12399 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12400 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12401 
   12402 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12403 				break;
   12404 			rv = -1;
   12405 		}
   12406 		break;
   12407 	default:
   12408 		break;
   12409 	}
   12410 
   12411 	/* Release semaphore */
   12412 	sc->phy.release(sc);
   12413 
   12414 	if (rv == 0) {
   12415 		if (wm_phy_resetisblocked(sc)) {
   12416 			printf("XXX reset is blocked(4)\n");
   12417 			goto out;
   12418 		}
   12419 		wm_reset_phy(sc);
   12420 		if (wm_phy_resetisblocked(sc))
   12421 			printf("XXX reset is blocked(4)\n");
   12422 	}
   12423 
   12424 out:
   12425 	/*
   12426 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12427 	 */
   12428 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12429 		delay(10*1000);
   12430 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12431 	}
   12432 }
   12433 
   12434 static void
   12435 wm_init_manageability(struct wm_softc *sc)
   12436 {
   12437 
   12438 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12439 		device_xname(sc->sc_dev), __func__));
   12440 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12441 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12442 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12443 
   12444 		/* Disable hardware interception of ARP */
   12445 		manc &= ~MANC_ARP_EN;
   12446 
   12447 		/* Enable receiving management packets to the host */
   12448 		if (sc->sc_type >= WM_T_82571) {
   12449 			manc |= MANC_EN_MNG2HOST;
   12450 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12451 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12452 		}
   12453 
   12454 		CSR_WRITE(sc, WMREG_MANC, manc);
   12455 	}
   12456 }
   12457 
   12458 static void
   12459 wm_release_manageability(struct wm_softc *sc)
   12460 {
   12461 
   12462 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12463 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12464 
   12465 		manc |= MANC_ARP_EN;
   12466 		if (sc->sc_type >= WM_T_82571)
   12467 			manc &= ~MANC_EN_MNG2HOST;
   12468 
   12469 		CSR_WRITE(sc, WMREG_MANC, manc);
   12470 	}
   12471 }
   12472 
   12473 static void
   12474 wm_get_wakeup(struct wm_softc *sc)
   12475 {
   12476 
   12477 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   12478 	switch (sc->sc_type) {
   12479 	case WM_T_82573:
   12480 	case WM_T_82583:
   12481 		sc->sc_flags |= WM_F_HAS_AMT;
   12482 		/* FALLTHROUGH */
   12483 	case WM_T_80003:
   12484 	case WM_T_82575:
   12485 	case WM_T_82576:
   12486 	case WM_T_82580:
   12487 	case WM_T_I350:
   12488 	case WM_T_I354:
   12489 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   12490 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   12491 		/* FALLTHROUGH */
   12492 	case WM_T_82541:
   12493 	case WM_T_82541_2:
   12494 	case WM_T_82547:
   12495 	case WM_T_82547_2:
   12496 	case WM_T_82571:
   12497 	case WM_T_82572:
   12498 	case WM_T_82574:
   12499 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12500 		break;
   12501 	case WM_T_ICH8:
   12502 	case WM_T_ICH9:
   12503 	case WM_T_ICH10:
   12504 	case WM_T_PCH:
   12505 	case WM_T_PCH2:
   12506 	case WM_T_PCH_LPT:
   12507 	case WM_T_PCH_SPT:
   12508 		sc->sc_flags |= WM_F_HAS_AMT;
   12509 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12510 		break;
   12511 	default:
   12512 		break;
   12513 	}
   12514 
   12515 	/* 1: HAS_MANAGE */
   12516 	if (wm_enable_mng_pass_thru(sc) != 0)
   12517 		sc->sc_flags |= WM_F_HAS_MANAGE;
   12518 
   12519 #ifdef WM_DEBUG
   12520 	printf("\n");
   12521 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   12522 		printf("HAS_AMT,");
   12523 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   12524 		printf("ARC_SUBSYS_VALID,");
   12525 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   12526 		printf("ASF_FIRMWARE_PRES,");
   12527 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   12528 		printf("HAS_MANAGE,");
   12529 	printf("\n");
   12530 #endif
   12531 	/*
   12532 	 * Note that the WOL flags is set after the resetting of the eeprom
   12533 	 * stuff
   12534 	 */
   12535 }
   12536 
   12537 /*
   12538  * Unconfigure Ultra Low Power mode.
   12539  * Only for I217 and newer (see below).
   12540  */
   12541 static void
   12542 wm_ulp_disable(struct wm_softc *sc)
   12543 {
   12544 	uint32_t reg;
   12545 	int i = 0;
   12546 
   12547 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12548 		device_xname(sc->sc_dev), __func__));
   12549 	/* Exclude old devices */
   12550 	if ((sc->sc_type < WM_T_PCH_LPT)
   12551 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   12552 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   12553 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   12554 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   12555 		return;
   12556 
   12557 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   12558 		/* Request ME un-configure ULP mode in the PHY */
   12559 		reg = CSR_READ(sc, WMREG_H2ME);
   12560 		reg &= ~H2ME_ULP;
   12561 		reg |= H2ME_ENFORCE_SETTINGS;
   12562 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12563 
   12564 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   12565 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   12566 			if (i++ == 30) {
   12567 				printf("%s timed out\n", __func__);
   12568 				return;
   12569 			}
   12570 			delay(10 * 1000);
   12571 		}
   12572 		reg = CSR_READ(sc, WMREG_H2ME);
   12573 		reg &= ~H2ME_ENFORCE_SETTINGS;
   12574 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12575 
   12576 		return;
   12577 	}
   12578 
   12579 	/* Acquire semaphore */
   12580 	sc->phy.acquire(sc);
   12581 
   12582 	/* Toggle LANPHYPC */
   12583 	wm_toggle_lanphypc_pch_lpt(sc);
   12584 
   12585 	/* Unforce SMBus mode in PHY */
   12586 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12587 	if (reg == 0x0000 || reg == 0xffff) {
   12588 		uint32_t reg2;
   12589 
   12590 		printf("%s: Force SMBus first.\n", __func__);
   12591 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   12592 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   12593 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   12594 		delay(50 * 1000);
   12595 
   12596 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12597 	}
   12598 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   12599 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   12600 
   12601 	/* Unforce SMBus mode in MAC */
   12602 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12603 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   12604 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12605 
   12606 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   12607 	reg |= HV_PM_CTRL_K1_ENA;
   12608 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   12609 
   12610 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   12611 	reg &= ~(I218_ULP_CONFIG1_IND
   12612 	    | I218_ULP_CONFIG1_STICKY_ULP
   12613 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   12614 	    | I218_ULP_CONFIG1_WOL_HOST
   12615 	    | I218_ULP_CONFIG1_INBAND_EXIT
   12616 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   12617 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   12618 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   12619 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12620 	reg |= I218_ULP_CONFIG1_START;
   12621 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12622 
   12623 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   12624 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   12625 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   12626 
   12627 	/* Release semaphore */
   12628 	sc->phy.release(sc);
   12629 	wm_gmii_reset(sc);
   12630 	delay(50 * 1000);
   12631 }
   12632 
   12633 /* WOL in the newer chipset interfaces (pchlan) */
   12634 static void
   12635 wm_enable_phy_wakeup(struct wm_softc *sc)
   12636 {
   12637 #if 0
   12638 	uint16_t preg;
   12639 
   12640 	/* Copy MAC RARs to PHY RARs */
   12641 
   12642 	/* Copy MAC MTA to PHY MTA */
   12643 
   12644 	/* Configure PHY Rx Control register */
   12645 
   12646 	/* Enable PHY wakeup in MAC register */
   12647 
   12648 	/* Configure and enable PHY wakeup in PHY registers */
   12649 
   12650 	/* Activate PHY wakeup */
   12651 
   12652 	/* XXX */
   12653 #endif
   12654 }
   12655 
   12656 /* Power down workaround on D3 */
   12657 static void
   12658 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   12659 {
   12660 	uint32_t reg;
   12661 	int i;
   12662 
   12663 	for (i = 0; i < 2; i++) {
   12664 		/* Disable link */
   12665 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12666 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12667 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12668 
   12669 		/*
   12670 		 * Call gig speed drop workaround on Gig disable before
   12671 		 * accessing any PHY registers
   12672 		 */
   12673 		if (sc->sc_type == WM_T_ICH8)
   12674 			wm_gig_downshift_workaround_ich8lan(sc);
   12675 
   12676 		/* Write VR power-down enable */
   12677 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12678 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12679 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   12680 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   12681 
   12682 		/* Read it back and test */
   12683 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12684 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12685 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   12686 			break;
   12687 
   12688 		/* Issue PHY reset and repeat at most one more time */
   12689 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   12690 	}
   12691 }
   12692 
   12693 static void
   12694 wm_enable_wakeup(struct wm_softc *sc)
   12695 {
   12696 	uint32_t reg, pmreg;
   12697 	pcireg_t pmode;
   12698 
   12699 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12700 		device_xname(sc->sc_dev), __func__));
   12701 
   12702 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12703 		&pmreg, NULL) == 0)
   12704 		return;
   12705 
   12706 	/* Advertise the wakeup capability */
   12707 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   12708 	    | CTRL_SWDPIN(3));
   12709 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   12710 
   12711 	/* ICH workaround */
   12712 	switch (sc->sc_type) {
   12713 	case WM_T_ICH8:
   12714 	case WM_T_ICH9:
   12715 	case WM_T_ICH10:
   12716 	case WM_T_PCH:
   12717 	case WM_T_PCH2:
   12718 	case WM_T_PCH_LPT:
   12719 	case WM_T_PCH_SPT:
   12720 		/* Disable gig during WOL */
   12721 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12722 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   12723 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12724 		if (sc->sc_type == WM_T_PCH)
   12725 			wm_gmii_reset(sc);
   12726 
   12727 		/* Power down workaround */
   12728 		if (sc->sc_phytype == WMPHY_82577) {
   12729 			struct mii_softc *child;
   12730 
   12731 			/* Assume that the PHY is copper */
   12732 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12733 			if (child->mii_mpd_rev <= 2)
   12734 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   12735 				    (768 << 5) | 25, 0x0444); /* magic num */
   12736 		}
   12737 		break;
   12738 	default:
   12739 		break;
   12740 	}
   12741 
   12742 	/* Keep the laser running on fiber adapters */
   12743 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   12744 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12745 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12746 		reg |= CTRL_EXT_SWDPIN(3);
   12747 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12748 	}
   12749 
   12750 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   12751 #if 0	/* for the multicast packet */
   12752 	reg |= WUFC_MC;
   12753 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   12754 #endif
   12755 
   12756 	if (sc->sc_type >= WM_T_PCH)
   12757 		wm_enable_phy_wakeup(sc);
   12758 	else {
   12759 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   12760 		CSR_WRITE(sc, WMREG_WUFC, reg);
   12761 	}
   12762 
   12763 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12764 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12765 		|| (sc->sc_type == WM_T_PCH2))
   12766 		    && (sc->sc_phytype == WMPHY_IGP_3))
   12767 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   12768 
   12769 	/* Request PME */
   12770 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   12771 #if 0
   12772 	/* Disable WOL */
   12773 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   12774 #else
   12775 	/* For WOL */
   12776 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   12777 #endif
   12778 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   12779 }
   12780 
   12781 /* LPLU */
   12782 
   12783 static void
   12784 wm_lplu_d0_disable(struct wm_softc *sc)
   12785 {
   12786 	uint32_t reg;
   12787 
   12788 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12789 		device_xname(sc->sc_dev), __func__));
   12790 
   12791 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12792 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   12793 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12794 }
   12795 
   12796 static void
   12797 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   12798 {
   12799 	uint32_t reg;
   12800 
   12801 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12802 		device_xname(sc->sc_dev), __func__));
   12803 
   12804 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   12805 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   12806 	reg |= HV_OEM_BITS_ANEGNOW;
   12807 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   12808 }
   12809 
   12810 /* EEE */
   12811 
   12812 static void
   12813 wm_set_eee_i350(struct wm_softc *sc)
   12814 {
   12815 	uint32_t ipcnfg, eeer;
   12816 
   12817 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   12818 	eeer = CSR_READ(sc, WMREG_EEER);
   12819 
   12820 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   12821 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12822 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12823 		    | EEER_LPI_FC);
   12824 	} else {
   12825 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12826 		ipcnfg &= ~IPCNFG_10BASE_TE;
   12827 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12828 		    | EEER_LPI_FC);
   12829 	}
   12830 
   12831 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   12832 	CSR_WRITE(sc, WMREG_EEER, eeer);
   12833 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   12834 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   12835 }
   12836 
   12837 /*
   12838  * Workarounds (mainly PHY related).
   12839  * Basically, PHY's workarounds are in the PHY drivers.
   12840  */
   12841 
   12842 /* Work-around for 82566 Kumeran PCS lock loss */
   12843 static void
   12844 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   12845 {
   12846 #if 0
   12847 	int miistatus, active, i;
   12848 	int reg;
   12849 
   12850 	miistatus = sc->sc_mii.mii_media_status;
   12851 
   12852 	/* If the link is not up, do nothing */
   12853 	if ((miistatus & IFM_ACTIVE) == 0)
   12854 		return;
   12855 
   12856 	active = sc->sc_mii.mii_media_active;
   12857 
   12858 	/* Nothing to do if the link is other than 1Gbps */
   12859 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   12860 		return;
   12861 
   12862 	for (i = 0; i < 10; i++) {
   12863 		/* read twice */
   12864 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12865 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12866 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   12867 			goto out;	/* GOOD! */
   12868 
   12869 		/* Reset the PHY */
   12870 		wm_gmii_reset(sc);
   12871 		delay(5*1000);
   12872 	}
   12873 
   12874 	/* Disable GigE link negotiation */
   12875 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12876 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12877 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12878 
   12879 	/*
   12880 	 * Call gig speed drop workaround on Gig disable before accessing
   12881 	 * any PHY registers.
   12882 	 */
   12883 	wm_gig_downshift_workaround_ich8lan(sc);
   12884 
   12885 out:
   12886 	return;
   12887 #endif
   12888 }
   12889 
   12890 /* WOL from S5 stops working */
   12891 static void
   12892 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   12893 {
   12894 	uint16_t kmrn_reg;
   12895 
   12896 	/* Only for igp3 */
   12897 	if (sc->sc_phytype == WMPHY_IGP_3) {
   12898 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   12899 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   12900 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12901 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   12902 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12903 	}
   12904 }
   12905 
   12906 /*
   12907  * Workaround for pch's PHYs
   12908  * XXX should be moved to new PHY driver?
   12909  */
   12910 static void
   12911 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   12912 {
   12913 
   12914 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12915 		device_xname(sc->sc_dev), __func__));
   12916 	KASSERT(sc->sc_type == WM_T_PCH);
   12917 
   12918 	if (sc->sc_phytype == WMPHY_82577)
   12919 		wm_set_mdio_slow_mode_hv(sc);
   12920 
   12921 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   12922 
   12923 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   12924 
   12925 	/* 82578 */
   12926 	if (sc->sc_phytype == WMPHY_82578) {
   12927 		struct mii_softc *child;
   12928 
   12929 		/*
   12930 		 * Return registers to default by doing a soft reset then
   12931 		 * writing 0x3140 to the control register
   12932 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   12933 		 */
   12934 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12935 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   12936 			PHY_RESET(child);
   12937 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   12938 			    0x3140);
   12939 		}
   12940 	}
   12941 
   12942 	/* Select page 0 */
   12943 	sc->phy.acquire(sc);
   12944 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   12945 	sc->phy.release(sc);
   12946 
   12947 	/*
   12948 	 * Configure the K1 Si workaround during phy reset assuming there is
   12949 	 * link so that it disables K1 if link is in 1Gbps.
   12950 	 */
   12951 	wm_k1_gig_workaround_hv(sc, 1);
   12952 }
   12953 
   12954 static void
   12955 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   12956 {
   12957 
   12958 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12959 		device_xname(sc->sc_dev), __func__));
   12960 	KASSERT(sc->sc_type == WM_T_PCH2);
   12961 
   12962 	wm_set_mdio_slow_mode_hv(sc);
   12963 }
   12964 
   12965 static int
   12966 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   12967 {
   12968 	int k1_enable = sc->sc_nvm_k1_enabled;
   12969 
   12970 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12971 		device_xname(sc->sc_dev), __func__));
   12972 
   12973 	if (sc->phy.acquire(sc) != 0)
   12974 		return -1;
   12975 
   12976 	if (link) {
   12977 		k1_enable = 0;
   12978 
   12979 		/* Link stall fix for link up */
   12980 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   12981 	} else {
   12982 		/* Link stall fix for link down */
   12983 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   12984 	}
   12985 
   12986 	wm_configure_k1_ich8lan(sc, k1_enable);
   12987 	sc->phy.release(sc);
   12988 
   12989 	return 0;
   12990 }
   12991 
   12992 static void
   12993 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   12994 {
   12995 	uint32_t reg;
   12996 
   12997 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   12998 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   12999 	    reg | HV_KMRN_MDIO_SLOW);
   13000 }
   13001 
   13002 static void
   13003 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13004 {
   13005 	uint32_t ctrl, ctrl_ext, tmp;
   13006 	uint16_t kmrn_reg;
   13007 
   13008 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13009 
   13010 	if (k1_enable)
   13011 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13012 	else
   13013 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13014 
   13015 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13016 
   13017 	delay(20);
   13018 
   13019 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13020 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13021 
   13022 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13023 	tmp |= CTRL_FRCSPD;
   13024 
   13025 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13026 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13027 	CSR_WRITE_FLUSH(sc);
   13028 	delay(20);
   13029 
   13030 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13031 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13032 	CSR_WRITE_FLUSH(sc);
   13033 	delay(20);
   13034 }
   13035 
   13036 /* special case - for 82575 - need to do manual init ... */
   13037 static void
   13038 wm_reset_init_script_82575(struct wm_softc *sc)
   13039 {
   13040 	/*
   13041 	 * remark: this is untested code - we have no board without EEPROM
   13042 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13043 	 */
   13044 
   13045 	/* SerDes configuration via SERDESCTRL */
   13046 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13047 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13048 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13049 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13050 
   13051 	/* CCM configuration via CCMCTL register */
   13052 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13053 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13054 
   13055 	/* PCIe lanes configuration */
   13056 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13057 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13058 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13059 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13060 
   13061 	/* PCIe PLL Configuration */
   13062 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13063 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13064 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13065 }
   13066 
   13067 static void
   13068 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13069 {
   13070 	uint32_t reg;
   13071 	uint16_t nvmword;
   13072 	int rv;
   13073 
   13074 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13075 		return;
   13076 
   13077 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13078 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13079 	if (rv != 0) {
   13080 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13081 		    __func__);
   13082 		return;
   13083 	}
   13084 
   13085 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13086 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13087 		reg |= MDICNFG_DEST;
   13088 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13089 		reg |= MDICNFG_COM_MDIO;
   13090 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13091 }
   13092 
   13093 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13094 
   13095 static bool
   13096 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13097 {
   13098 	int i;
   13099 	uint32_t reg;
   13100 	uint16_t id1, id2;
   13101 
   13102 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13103 		device_xname(sc->sc_dev), __func__));
   13104 	id1 = id2 = 0xffff;
   13105 	for (i = 0; i < 2; i++) {
   13106 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13107 		if (MII_INVALIDID(id1))
   13108 			continue;
   13109 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13110 		if (MII_INVALIDID(id2))
   13111 			continue;
   13112 		break;
   13113 	}
   13114 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13115 		goto out;
   13116 	}
   13117 
   13118 	if (sc->sc_type < WM_T_PCH_LPT) {
   13119 		sc->phy.release(sc);
   13120 		wm_set_mdio_slow_mode_hv(sc);
   13121 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13122 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13123 		sc->phy.acquire(sc);
   13124 	}
   13125 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13126 		printf("XXX return with false\n");
   13127 		return false;
   13128 	}
   13129 out:
   13130 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13131 		/* Only unforce SMBus if ME is not active */
   13132 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13133 			/* Unforce SMBus mode in PHY */
   13134 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13135 			    CV_SMB_CTRL);
   13136 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13137 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13138 			    CV_SMB_CTRL, reg);
   13139 
   13140 			/* Unforce SMBus mode in MAC */
   13141 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13142 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13143 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13144 		}
   13145 	}
   13146 	return true;
   13147 }
   13148 
   13149 static void
   13150 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13151 {
   13152 	uint32_t reg;
   13153 	int i;
   13154 
   13155 	/* Set PHY Config Counter to 50msec */
   13156 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13157 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13158 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13159 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13160 
   13161 	/* Toggle LANPHYPC */
   13162 	reg = CSR_READ(sc, WMREG_CTRL);
   13163 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13164 	reg &= ~CTRL_LANPHYPC_VALUE;
   13165 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13166 	CSR_WRITE_FLUSH(sc);
   13167 	delay(1000);
   13168 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13169 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13170 	CSR_WRITE_FLUSH(sc);
   13171 
   13172 	if (sc->sc_type < WM_T_PCH_LPT)
   13173 		delay(50 * 1000);
   13174 	else {
   13175 		i = 20;
   13176 
   13177 		do {
   13178 			delay(5 * 1000);
   13179 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13180 		    && i--);
   13181 
   13182 		delay(30 * 1000);
   13183 	}
   13184 }
   13185 
   13186 static int
   13187 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13188 {
   13189 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13190 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13191 	uint32_t rxa;
   13192 	uint16_t scale = 0, lat_enc = 0;
   13193 	int64_t lat_ns, value;
   13194 
   13195 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13196 		device_xname(sc->sc_dev), __func__));
   13197 
   13198 	if (link) {
   13199 		pcireg_t preg;
   13200 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13201 
   13202 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13203 
   13204 		/*
   13205 		 * Determine the maximum latency tolerated by the device.
   13206 		 *
   13207 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13208 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13209 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13210 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13211 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13212 		 */
   13213 		lat_ns = ((int64_t)rxa * 1024 -
   13214 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   13215 		if (lat_ns < 0)
   13216 			lat_ns = 0;
   13217 		else {
   13218 			uint32_t status;
   13219 			uint16_t speed;
   13220 
   13221 			status = CSR_READ(sc, WMREG_STATUS);
   13222 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13223 			case STATUS_SPEED_10:
   13224 				speed = 10;
   13225 				break;
   13226 			case STATUS_SPEED_100:
   13227 				speed = 100;
   13228 				break;
   13229 			case STATUS_SPEED_1000:
   13230 				speed = 1000;
   13231 				break;
   13232 			default:
   13233 				printf("%s: Unknown speed (status = %08x)\n",
   13234 				    device_xname(sc->sc_dev), status);
   13235 				return -1;
   13236 			}
   13237 			lat_ns /= speed;
   13238 		}
   13239 		value = lat_ns;
   13240 
   13241 		while (value > LTRV_VALUE) {
   13242 			scale ++;
   13243 			value = howmany(value, __BIT(5));
   13244 		}
   13245 		if (scale > LTRV_SCALE_MAX) {
   13246 			printf("%s: Invalid LTR latency scale %d\n",
   13247 			    device_xname(sc->sc_dev), scale);
   13248 			return -1;
   13249 		}
   13250 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13251 
   13252 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13253 		    WM_PCI_LTR_CAP_LPT);
   13254 		max_snoop = preg & 0xffff;
   13255 		max_nosnoop = preg >> 16;
   13256 
   13257 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13258 
   13259 		if (lat_enc > max_ltr_enc) {
   13260 			lat_enc = max_ltr_enc;
   13261 		}
   13262 	}
   13263 	/* Snoop and No-Snoop latencies the same */
   13264 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13265 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13266 
   13267 	return 0;
   13268 }
   13269 
   13270 /*
   13271  * I210 Errata 25 and I211 Errata 10
   13272  * Slow System Clock.
   13273  */
   13274 static void
   13275 wm_pll_workaround_i210(struct wm_softc *sc)
   13276 {
   13277 	uint32_t mdicnfg, wuc;
   13278 	uint32_t reg;
   13279 	pcireg_t pcireg;
   13280 	uint32_t pmreg;
   13281 	uint16_t nvmword, tmp_nvmword;
   13282 	int phyval;
   13283 	bool wa_done = false;
   13284 	int i;
   13285 
   13286 	/* Save WUC and MDICNFG registers */
   13287 	wuc = CSR_READ(sc, WMREG_WUC);
   13288 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13289 
   13290 	reg = mdicnfg & ~MDICNFG_DEST;
   13291 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13292 
   13293 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13294 		nvmword = INVM_DEFAULT_AL;
   13295 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13296 
   13297 	/* Get Power Management cap offset */
   13298 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13299 		&pmreg, NULL) == 0)
   13300 		return;
   13301 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13302 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13303 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13304 
   13305 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13306 			break; /* OK */
   13307 		}
   13308 
   13309 		wa_done = true;
   13310 		/* Directly reset the internal PHY */
   13311 		reg = CSR_READ(sc, WMREG_CTRL);
   13312 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13313 
   13314 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13315 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13316 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13317 
   13318 		CSR_WRITE(sc, WMREG_WUC, 0);
   13319 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13320 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13321 
   13322 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13323 		    pmreg + PCI_PMCSR);
   13324 		pcireg |= PCI_PMCSR_STATE_D3;
   13325 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13326 		    pmreg + PCI_PMCSR, pcireg);
   13327 		delay(1000);
   13328 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13329 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13330 		    pmreg + PCI_PMCSR, pcireg);
   13331 
   13332 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13333 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13334 
   13335 		/* Restore WUC register */
   13336 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13337 	}
   13338 
   13339 	/* Restore MDICNFG setting */
   13340 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13341 	if (wa_done)
   13342 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13343 }
   13344