Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.475
      1 /*	$NetBSD: if_wm.c,v 1.475 2017/02/01 08:56:41 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Split header buffer for newer descriptors
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.475 2017/02/01 08:56:41 msaitoh Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #endif
    165 
    166 /*
    167  * This device driver's max interrupt numbers.
    168  */
    169 #define WM_MAX_NQUEUEINTR	16
    170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 #define	WM_TXINTERQSIZE		256
    201 
    202 /*
    203  * Receive descriptor list size.  We have one Rx buffer for normal
    204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    205  * packet.  We allocate 256 receive descriptors, each with a 2k
    206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    207  */
    208 #define	WM_NRXDESC		256
    209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    212 
    213 typedef union txdescs {
    214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    216 } txdescs_t;
    217 
    218 typedef union rxdescs {
    219 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    220 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    221 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    222 } rxdescs_t;
    223 
    224 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    225 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    226 
    227 /*
    228  * Software state for transmit jobs.
    229  */
    230 struct wm_txsoft {
    231 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    232 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    233 	int txs_firstdesc;		/* first descriptor in packet */
    234 	int txs_lastdesc;		/* last descriptor in packet */
    235 	int txs_ndesc;			/* # of descriptors used */
    236 };
    237 
    238 /*
    239  * Software state for receive buffers.  Each descriptor gets a
    240  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    241  * more than one buffer, we chain them together.
    242  */
    243 struct wm_rxsoft {
    244 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    245 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    246 };
    247 
    248 #define WM_LINKUP_TIMEOUT	50
    249 
    250 static uint16_t swfwphysem[] = {
    251 	SWFW_PHY0_SM,
    252 	SWFW_PHY1_SM,
    253 	SWFW_PHY2_SM,
    254 	SWFW_PHY3_SM
    255 };
    256 
    257 static const uint32_t wm_82580_rxpbs_table[] = {
    258 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    259 };
    260 
    261 struct wm_softc;
    262 
    263 #ifdef WM_EVENT_COUNTERS
    264 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    265 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    266 	struct evcnt qname##_ev_##evname;
    267 
    268 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    269 	do{								\
    270 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    271 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    272 		    "%s%02d%s", #qname, (qnum), #evname);		\
    273 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    274 		    (evtype), NULL, (xname),				\
    275 		    (q)->qname##_##evname##_evcnt_name);		\
    276 	}while(0)
    277 
    278 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    279 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    280 
    281 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    282 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    283 #endif /* WM_EVENT_COUNTERS */
    284 
    285 struct wm_txqueue {
    286 	kmutex_t *txq_lock;		/* lock for tx operations */
    287 
    288 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    289 
    290 	/* Software state for the transmit descriptors. */
    291 	int txq_num;			/* must be a power of two */
    292 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    293 
    294 	/* TX control data structures. */
    295 	int txq_ndesc;			/* must be a power of two */
    296 	size_t txq_descsize;		/* a tx descriptor size */
    297 	txdescs_t *txq_descs_u;
    298         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    299 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    300 	int txq_desc_rseg;		/* real number of control segment */
    301 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    302 #define	txq_descs	txq_descs_u->sctxu_txdescs
    303 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    304 
    305 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    306 
    307 	int txq_free;			/* number of free Tx descriptors */
    308 	int txq_next;			/* next ready Tx descriptor */
    309 
    310 	int txq_sfree;			/* number of free Tx jobs */
    311 	int txq_snext;			/* next free Tx job */
    312 	int txq_sdirty;			/* dirty Tx jobs */
    313 
    314 	/* These 4 variables are used only on the 82547. */
    315 	int txq_fifo_size;		/* Tx FIFO size */
    316 	int txq_fifo_head;		/* current head of FIFO */
    317 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    318 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    319 
    320 	/*
    321 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    322 	 * CPUs. This queue intermediate them without block.
    323 	 */
    324 	pcq_t *txq_interq;
    325 
    326 	/*
    327 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    328 	 * to manage Tx H/W queue's busy flag.
    329 	 */
    330 	int txq_flags;			/* flags for H/W queue, see below */
    331 #define	WM_TXQ_NO_SPACE	0x1
    332 
    333 	bool txq_stopping;
    334 
    335 #ifdef WM_EVENT_COUNTERS
    336 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    337 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    338 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    339 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    340 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    341 						/* XXX not used? */
    342 
    343 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    344 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    345 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    346 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    347 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    348 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    349 
    350 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    351 
    352 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    353 
    354 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    355 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    356 #endif /* WM_EVENT_COUNTERS */
    357 };
    358 
    359 struct wm_rxqueue {
    360 	kmutex_t *rxq_lock;		/* lock for rx operations */
    361 
    362 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    363 
    364 	/* Software state for the receive descriptors. */
    365 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    366 
    367 	/* RX control data structures. */
    368 	int rxq_ndesc;			/* must be a power of two */
    369 	size_t rxq_descsize;		/* a rx descriptor size */
    370 	rxdescs_t *rxq_descs_u;
    371 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    372 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    373 	int rxq_desc_rseg;		/* real number of control segment */
    374 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    375 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    376 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    377 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    378 
    379 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    380 
    381 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    382 	int rxq_discard;
    383 	int rxq_len;
    384 	struct mbuf *rxq_head;
    385 	struct mbuf *rxq_tail;
    386 	struct mbuf **rxq_tailp;
    387 
    388 	bool rxq_stopping;
    389 
    390 #ifdef WM_EVENT_COUNTERS
    391 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    392 
    393 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    394 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    395 #endif
    396 };
    397 
    398 struct wm_queue {
    399 	int wmq_id;			/* index of transmit and receive queues */
    400 	int wmq_intr_idx;		/* index of MSI-X tables */
    401 
    402 	struct wm_txqueue wmq_txq;
    403 	struct wm_rxqueue wmq_rxq;
    404 };
    405 
    406 struct wm_phyop {
    407 	int (*acquire)(struct wm_softc *);
    408 	void (*release)(struct wm_softc *);
    409 	int reset_delay_us;
    410 };
    411 
    412 /*
    413  * Software state per device.
    414  */
    415 struct wm_softc {
    416 	device_t sc_dev;		/* generic device information */
    417 	bus_space_tag_t sc_st;		/* bus space tag */
    418 	bus_space_handle_t sc_sh;	/* bus space handle */
    419 	bus_size_t sc_ss;		/* bus space size */
    420 	bus_space_tag_t sc_iot;		/* I/O space tag */
    421 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    422 	bus_size_t sc_ios;		/* I/O space size */
    423 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    424 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    425 	bus_size_t sc_flashs;		/* flash registers space size */
    426 	off_t sc_flashreg_offset;	/*
    427 					 * offset to flash registers from
    428 					 * start of BAR
    429 					 */
    430 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    431 
    432 	struct ethercom sc_ethercom;	/* ethernet common data */
    433 	struct mii_data sc_mii;		/* MII/media information */
    434 
    435 	pci_chipset_tag_t sc_pc;
    436 	pcitag_t sc_pcitag;
    437 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    438 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    439 
    440 	uint16_t sc_pcidevid;		/* PCI device ID */
    441 	wm_chip_type sc_type;		/* MAC type */
    442 	int sc_rev;			/* MAC revision */
    443 	wm_phy_type sc_phytype;		/* PHY type */
    444 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    445 #define	WM_MEDIATYPE_UNKNOWN		0x00
    446 #define	WM_MEDIATYPE_FIBER		0x01
    447 #define	WM_MEDIATYPE_COPPER		0x02
    448 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    449 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    450 	int sc_flags;			/* flags; see below */
    451 	int sc_if_flags;		/* last if_flags */
    452 	int sc_flowflags;		/* 802.3x flow control flags */
    453 	int sc_align_tweak;
    454 
    455 	void *sc_ihs[WM_MAX_NINTR];	/*
    456 					 * interrupt cookie.
    457 					 * legacy and msi use sc_ihs[0].
    458 					 */
    459 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    460 	int sc_nintrs;			/* number of interrupts */
    461 
    462 	int sc_link_intr_idx;		/* index of MSI-X tables */
    463 
    464 	callout_t sc_tick_ch;		/* tick callout */
    465 	bool sc_core_stopping;
    466 
    467 	int sc_nvm_ver_major;
    468 	int sc_nvm_ver_minor;
    469 	int sc_nvm_ver_build;
    470 	int sc_nvm_addrbits;		/* NVM address bits */
    471 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    472 	int sc_ich8_flash_base;
    473 	int sc_ich8_flash_bank_size;
    474 	int sc_nvm_k1_enabled;
    475 
    476 	int sc_nqueues;
    477 	struct wm_queue *sc_queue;
    478 
    479 	int sc_affinity_offset;
    480 
    481 #ifdef WM_EVENT_COUNTERS
    482 	/* Event counters. */
    483 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    484 
    485         /* WM_T_82542_2_1 only */
    486 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    487 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    488 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    489 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    490 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    491 #endif /* WM_EVENT_COUNTERS */
    492 
    493 	/* This variable are used only on the 82547. */
    494 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    495 
    496 	uint32_t sc_ctrl;		/* prototype CTRL register */
    497 #if 0
    498 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    499 #endif
    500 	uint32_t sc_icr;		/* prototype interrupt bits */
    501 	uint32_t sc_itr;		/* prototype intr throttling reg */
    502 	uint32_t sc_tctl;		/* prototype TCTL register */
    503 	uint32_t sc_rctl;		/* prototype RCTL register */
    504 	uint32_t sc_txcw;		/* prototype TXCW register */
    505 	uint32_t sc_tipg;		/* prototype TIPG register */
    506 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    507 	uint32_t sc_pba;		/* prototype PBA register */
    508 
    509 	int sc_tbi_linkup;		/* TBI link status */
    510 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    511 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    512 
    513 	int sc_mchash_type;		/* multicast filter offset */
    514 
    515 	krndsource_t rnd_source;	/* random source */
    516 
    517 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    518 
    519 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    520 	kmutex_t *sc_ich_phymtx;	/*
    521 					 * 82574/82583/ICH/PCH specific PHY
    522 					 * mutex. For 82574/82583, the mutex
    523 					 * is used for both PHY and NVM.
    524 					 */
    525 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    526 
    527 	struct wm_phyop phy;
    528 };
    529 
    530 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    531 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    532 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    533 
    534 #ifdef WM_MPSAFE
    535 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    536 #else
    537 #define CALLOUT_FLAGS	0
    538 #endif
    539 
    540 #define	WM_RXCHAIN_RESET(rxq)						\
    541 do {									\
    542 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    543 	*(rxq)->rxq_tailp = NULL;					\
    544 	(rxq)->rxq_len = 0;						\
    545 } while (/*CONSTCOND*/0)
    546 
    547 #define	WM_RXCHAIN_LINK(rxq, m)						\
    548 do {									\
    549 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    550 	(rxq)->rxq_tailp = &(m)->m_next;				\
    551 } while (/*CONSTCOND*/0)
    552 
    553 #ifdef WM_EVENT_COUNTERS
    554 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    555 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    556 
    557 #define WM_Q_EVCNT_INCR(qname, evname)			\
    558 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    559 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    560 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    561 #else /* !WM_EVENT_COUNTERS */
    562 #define	WM_EVCNT_INCR(ev)	/* nothing */
    563 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    564 
    565 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    566 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    567 #endif /* !WM_EVENT_COUNTERS */
    568 
    569 #define	CSR_READ(sc, reg)						\
    570 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    571 #define	CSR_WRITE(sc, reg, val)						\
    572 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    573 #define	CSR_WRITE_FLUSH(sc)						\
    574 	(void) CSR_READ((sc), WMREG_STATUS)
    575 
    576 #define ICH8_FLASH_READ32(sc, reg)					\
    577 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    578 	    (reg) + sc->sc_flashreg_offset)
    579 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    580 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    581 	    (reg) + sc->sc_flashreg_offset, (data))
    582 
    583 #define ICH8_FLASH_READ16(sc, reg)					\
    584 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    585 	    (reg) + sc->sc_flashreg_offset)
    586 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    587 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    588 	    (reg) + sc->sc_flashreg_offset, (data))
    589 
    590 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    591 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    592 
    593 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    594 #define	WM_CDTXADDR_HI(txq, x)						\
    595 	(sizeof(bus_addr_t) == 8 ?					\
    596 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    597 
    598 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    599 #define	WM_CDRXADDR_HI(rxq, x)						\
    600 	(sizeof(bus_addr_t) == 8 ?					\
    601 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    602 
    603 /*
    604  * Register read/write functions.
    605  * Other than CSR_{READ|WRITE}().
    606  */
    607 #if 0
    608 static inline uint32_t wm_io_read(struct wm_softc *, int);
    609 #endif
    610 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    611 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    612 	uint32_t, uint32_t);
    613 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    614 
    615 /*
    616  * Descriptor sync/init functions.
    617  */
    618 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    619 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    620 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    621 
    622 /*
    623  * Device driver interface functions and commonly used functions.
    624  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    625  */
    626 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    627 static int	wm_match(device_t, cfdata_t, void *);
    628 static void	wm_attach(device_t, device_t, void *);
    629 static int	wm_detach(device_t, int);
    630 static bool	wm_suspend(device_t, const pmf_qual_t *);
    631 static bool	wm_resume(device_t, const pmf_qual_t *);
    632 static void	wm_watchdog(struct ifnet *);
    633 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    634 static void	wm_tick(void *);
    635 static int	wm_ifflags_cb(struct ethercom *);
    636 static int	wm_ioctl(struct ifnet *, u_long, void *);
    637 /* MAC address related */
    638 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    639 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    640 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    641 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    642 static void	wm_set_filter(struct wm_softc *);
    643 /* Reset and init related */
    644 static void	wm_set_vlan(struct wm_softc *);
    645 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    646 static void	wm_get_auto_rd_done(struct wm_softc *);
    647 static void	wm_lan_init_done(struct wm_softc *);
    648 static void	wm_get_cfg_done(struct wm_softc *);
    649 static void	wm_initialize_hardware_bits(struct wm_softc *);
    650 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    651 static void	wm_reset_phy(struct wm_softc *);
    652 static void	wm_flush_desc_rings(struct wm_softc *);
    653 static void	wm_reset(struct wm_softc *);
    654 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    655 static void	wm_rxdrain(struct wm_rxqueue *);
    656 static void	wm_rss_getkey(uint8_t *);
    657 static void	wm_init_rss(struct wm_softc *);
    658 static void	wm_adjust_qnum(struct wm_softc *, int);
    659 static int	wm_setup_legacy(struct wm_softc *);
    660 static int	wm_setup_msix(struct wm_softc *);
    661 static int	wm_init(struct ifnet *);
    662 static int	wm_init_locked(struct ifnet *);
    663 static void	wm_turnon(struct wm_softc *);
    664 static void	wm_turnoff(struct wm_softc *);
    665 static void	wm_stop(struct ifnet *, int);
    666 static void	wm_stop_locked(struct ifnet *, int);
    667 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    668 static void	wm_82547_txfifo_stall(void *);
    669 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    670 /* DMA related */
    671 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    672 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    673 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    674 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    675     struct wm_txqueue *);
    676 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    677 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    678 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    679     struct wm_rxqueue *);
    680 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    681 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    682 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    683 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    684 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    685 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    686 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    687     struct wm_txqueue *);
    688 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    689     struct wm_rxqueue *);
    690 static int	wm_alloc_txrx_queues(struct wm_softc *);
    691 static void	wm_free_txrx_queues(struct wm_softc *);
    692 static int	wm_init_txrx_queues(struct wm_softc *);
    693 /* Start */
    694 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    695     uint32_t *, uint8_t *);
    696 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    697 static void	wm_start(struct ifnet *);
    698 static void	wm_start_locked(struct ifnet *);
    699 static int	wm_transmit(struct ifnet *, struct mbuf *);
    700 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    701 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    702 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    703     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    704 static void	wm_nq_start(struct ifnet *);
    705 static void	wm_nq_start_locked(struct ifnet *);
    706 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    707 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    708 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    709 static void	wm_deferred_start(struct ifnet *);
    710 /* Interrupt */
    711 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    712 static void	wm_rxeof(struct wm_rxqueue *);
    713 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    714 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    715 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    716 static void	wm_linkintr(struct wm_softc *, uint32_t);
    717 static int	wm_intr_legacy(void *);
    718 static int	wm_txrxintr_msix(void *);
    719 static int	wm_linkintr_msix(void *);
    720 
    721 /*
    722  * Media related.
    723  * GMII, SGMII, TBI, SERDES and SFP.
    724  */
    725 /* Common */
    726 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    727 /* GMII related */
    728 static void	wm_gmii_reset(struct wm_softc *);
    729 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    730 static int	wm_get_phy_id_82575(struct wm_softc *);
    731 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    732 static int	wm_gmii_mediachange(struct ifnet *);
    733 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    734 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    735 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    736 static int	wm_gmii_i82543_readreg(device_t, int, int);
    737 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    738 static int	wm_gmii_mdic_readreg(device_t, int, int);
    739 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    740 static int	wm_gmii_i82544_readreg(device_t, int, int);
    741 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    742 static int	wm_gmii_i80003_readreg(device_t, int, int);
    743 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    744 static int	wm_gmii_bm_readreg(device_t, int, int);
    745 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    746 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    747 static int	wm_gmii_hv_readreg(device_t, int, int);
    748 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    749 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    750 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    751 static int	wm_gmii_82580_readreg(device_t, int, int);
    752 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    753 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    754 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    755 static void	wm_gmii_statchg(struct ifnet *);
    756 /*
    757  * kumeran related (80003, ICH* and PCH*).
    758  * These functions are not for accessing MII registers but for accessing
    759  * kumeran specific registers.
    760  */
    761 static int	wm_kmrn_readreg(struct wm_softc *, int);
    762 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    763 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    764 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    765 /* SGMII */
    766 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    767 static int	wm_sgmii_readreg(device_t, int, int);
    768 static void	wm_sgmii_writereg(device_t, int, int, int);
    769 /* TBI related */
    770 static void	wm_tbi_mediainit(struct wm_softc *);
    771 static int	wm_tbi_mediachange(struct ifnet *);
    772 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    773 static int	wm_check_for_link(struct wm_softc *);
    774 static void	wm_tbi_tick(struct wm_softc *);
    775 /* SERDES related */
    776 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    777 static int	wm_serdes_mediachange(struct ifnet *);
    778 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    779 static void	wm_serdes_tick(struct wm_softc *);
    780 /* SFP related */
    781 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    782 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    783 
    784 /*
    785  * NVM related.
    786  * Microwire, SPI (w/wo EERD) and Flash.
    787  */
    788 /* Misc functions */
    789 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    790 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    791 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    792 /* Microwire */
    793 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    794 /* SPI */
    795 static int	wm_nvm_ready_spi(struct wm_softc *);
    796 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    797 /* Using with EERD */
    798 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    799 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    800 /* Flash */
    801 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    802     unsigned int *);
    803 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    804 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    805 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    806 	uint32_t *);
    807 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    808 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    809 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    810 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    811 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    812 /* iNVM */
    813 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    814 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    815 /* Lock, detecting NVM type, validate checksum and read */
    816 static int	wm_nvm_acquire(struct wm_softc *);
    817 static void	wm_nvm_release(struct wm_softc *);
    818 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    819 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    820 static int	wm_nvm_validate_checksum(struct wm_softc *);
    821 static void	wm_nvm_version_invm(struct wm_softc *);
    822 static void	wm_nvm_version(struct wm_softc *);
    823 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    824 
    825 /*
    826  * Hardware semaphores.
    827  * Very complexed...
    828  */
    829 static int	wm_get_null(struct wm_softc *);
    830 static void	wm_put_null(struct wm_softc *);
    831 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    832 static void	wm_put_swsm_semaphore(struct wm_softc *);
    833 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    834 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    835 static int	wm_get_phy_82575(struct wm_softc *);
    836 static void	wm_put_phy_82575(struct wm_softc *);
    837 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    838 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    839 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    840 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    841 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    842 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    843 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    844 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    845 
    846 /*
    847  * Management mode and power management related subroutines.
    848  * BMC, AMT, suspend/resume and EEE.
    849  */
    850 #if 0
    851 static int	wm_check_mng_mode(struct wm_softc *);
    852 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    853 static int	wm_check_mng_mode_82574(struct wm_softc *);
    854 static int	wm_check_mng_mode_generic(struct wm_softc *);
    855 #endif
    856 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    857 static bool	wm_phy_resetisblocked(struct wm_softc *);
    858 static void	wm_get_hw_control(struct wm_softc *);
    859 static void	wm_release_hw_control(struct wm_softc *);
    860 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    861 static void	wm_smbustopci(struct wm_softc *);
    862 static void	wm_init_manageability(struct wm_softc *);
    863 static void	wm_release_manageability(struct wm_softc *);
    864 static void	wm_get_wakeup(struct wm_softc *);
    865 static void	wm_ulp_disable(struct wm_softc *);
    866 static void	wm_enable_phy_wakeup(struct wm_softc *);
    867 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    868 static void	wm_enable_wakeup(struct wm_softc *);
    869 /* LPLU (Low Power Link Up) */
    870 static void	wm_lplu_d0_disable(struct wm_softc *);
    871 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    872 /* EEE */
    873 static void	wm_set_eee_i350(struct wm_softc *);
    874 
    875 /*
    876  * Workarounds (mainly PHY related).
    877  * Basically, PHY's workarounds are in the PHY drivers.
    878  */
    879 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    880 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    881 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    882 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    883 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    884 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    885 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    886 static void	wm_reset_init_script_82575(struct wm_softc *);
    887 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    888 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    889 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    890 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    891 static void	wm_pll_workaround_i210(struct wm_softc *);
    892 
    893 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    894     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    895 
    896 /*
    897  * Devices supported by this driver.
    898  */
    899 static const struct wm_product {
    900 	pci_vendor_id_t		wmp_vendor;
    901 	pci_product_id_t	wmp_product;
    902 	const char		*wmp_name;
    903 	wm_chip_type		wmp_type;
    904 	uint32_t		wmp_flags;
    905 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    906 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    907 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    908 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    909 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    910 } wm_products[] = {
    911 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    912 	  "Intel i82542 1000BASE-X Ethernet",
    913 	  WM_T_82542_2_1,	WMP_F_FIBER },
    914 
    915 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    916 	  "Intel i82543GC 1000BASE-X Ethernet",
    917 	  WM_T_82543,		WMP_F_FIBER },
    918 
    919 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    920 	  "Intel i82543GC 1000BASE-T Ethernet",
    921 	  WM_T_82543,		WMP_F_COPPER },
    922 
    923 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    924 	  "Intel i82544EI 1000BASE-T Ethernet",
    925 	  WM_T_82544,		WMP_F_COPPER },
    926 
    927 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    928 	  "Intel i82544EI 1000BASE-X Ethernet",
    929 	  WM_T_82544,		WMP_F_FIBER },
    930 
    931 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    932 	  "Intel i82544GC 1000BASE-T Ethernet",
    933 	  WM_T_82544,		WMP_F_COPPER },
    934 
    935 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    936 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    937 	  WM_T_82544,		WMP_F_COPPER },
    938 
    939 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    940 	  "Intel i82540EM 1000BASE-T Ethernet",
    941 	  WM_T_82540,		WMP_F_COPPER },
    942 
    943 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    944 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    945 	  WM_T_82540,		WMP_F_COPPER },
    946 
    947 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    948 	  "Intel i82540EP 1000BASE-T Ethernet",
    949 	  WM_T_82540,		WMP_F_COPPER },
    950 
    951 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    952 	  "Intel i82540EP 1000BASE-T Ethernet",
    953 	  WM_T_82540,		WMP_F_COPPER },
    954 
    955 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    956 	  "Intel i82540EP 1000BASE-T Ethernet",
    957 	  WM_T_82540,		WMP_F_COPPER },
    958 
    959 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    960 	  "Intel i82545EM 1000BASE-T Ethernet",
    961 	  WM_T_82545,		WMP_F_COPPER },
    962 
    963 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    964 	  "Intel i82545GM 1000BASE-T Ethernet",
    965 	  WM_T_82545_3,		WMP_F_COPPER },
    966 
    967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    968 	  "Intel i82545GM 1000BASE-X Ethernet",
    969 	  WM_T_82545_3,		WMP_F_FIBER },
    970 
    971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    972 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    973 	  WM_T_82545_3,		WMP_F_SERDES },
    974 
    975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    976 	  "Intel i82546EB 1000BASE-T Ethernet",
    977 	  WM_T_82546,		WMP_F_COPPER },
    978 
    979 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    980 	  "Intel i82546EB 1000BASE-T Ethernet",
    981 	  WM_T_82546,		WMP_F_COPPER },
    982 
    983 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    984 	  "Intel i82545EM 1000BASE-X Ethernet",
    985 	  WM_T_82545,		WMP_F_FIBER },
    986 
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    988 	  "Intel i82546EB 1000BASE-X Ethernet",
    989 	  WM_T_82546,		WMP_F_FIBER },
    990 
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    992 	  "Intel i82546GB 1000BASE-T Ethernet",
    993 	  WM_T_82546_3,		WMP_F_COPPER },
    994 
    995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    996 	  "Intel i82546GB 1000BASE-X Ethernet",
    997 	  WM_T_82546_3,		WMP_F_FIBER },
    998 
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1000 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1001 	  WM_T_82546_3,		WMP_F_SERDES },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1004 	  "i82546GB quad-port Gigabit Ethernet",
   1005 	  WM_T_82546_3,		WMP_F_COPPER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1008 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1009 	  WM_T_82546_3,		WMP_F_COPPER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1012 	  "Intel PRO/1000MT (82546GB)",
   1013 	  WM_T_82546_3,		WMP_F_COPPER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1016 	  "Intel i82541EI 1000BASE-T Ethernet",
   1017 	  WM_T_82541,		WMP_F_COPPER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1020 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1021 	  WM_T_82541,		WMP_F_COPPER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1024 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1025 	  WM_T_82541,		WMP_F_COPPER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1028 	  "Intel i82541ER 1000BASE-T Ethernet",
   1029 	  WM_T_82541_2,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1032 	  "Intel i82541GI 1000BASE-T Ethernet",
   1033 	  WM_T_82541_2,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1036 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1037 	  WM_T_82541_2,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1040 	  "Intel i82541PI 1000BASE-T Ethernet",
   1041 	  WM_T_82541_2,		WMP_F_COPPER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1044 	  "Intel i82547EI 1000BASE-T Ethernet",
   1045 	  WM_T_82547,		WMP_F_COPPER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1048 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1049 	  WM_T_82547,		WMP_F_COPPER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1052 	  "Intel i82547GI 1000BASE-T Ethernet",
   1053 	  WM_T_82547_2,		WMP_F_COPPER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1056 	  "Intel PRO/1000 PT (82571EB)",
   1057 	  WM_T_82571,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1060 	  "Intel PRO/1000 PF (82571EB)",
   1061 	  WM_T_82571,		WMP_F_FIBER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1064 	  "Intel PRO/1000 PB (82571EB)",
   1065 	  WM_T_82571,		WMP_F_SERDES },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1068 	  "Intel PRO/1000 QT (82571EB)",
   1069 	  WM_T_82571,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1072 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1073 	  WM_T_82571,		WMP_F_COPPER, },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1076 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1077 	  WM_T_82571,		WMP_F_COPPER, },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1080 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1081 	  WM_T_82571,		WMP_F_SERDES, },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1084 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1085 	  WM_T_82571,		WMP_F_SERDES, },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1088 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1089 	  WM_T_82571,		WMP_F_FIBER, },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1092 	  "Intel i82572EI 1000baseT Ethernet",
   1093 	  WM_T_82572,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1096 	  "Intel i82572EI 1000baseX Ethernet",
   1097 	  WM_T_82572,		WMP_F_FIBER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1100 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1101 	  WM_T_82572,		WMP_F_SERDES },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1104 	  "Intel i82572EI 1000baseT Ethernet",
   1105 	  WM_T_82572,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1108 	  "Intel i82573E",
   1109 	  WM_T_82573,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1112 	  "Intel i82573E IAMT",
   1113 	  WM_T_82573,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1116 	  "Intel i82573L Gigabit Ethernet",
   1117 	  WM_T_82573,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1120 	  "Intel i82574L",
   1121 	  WM_T_82574,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1124 	  "Intel i82574L",
   1125 	  WM_T_82574,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1128 	  "Intel i82583V",
   1129 	  WM_T_82583,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1132 	  "i80003 dual 1000baseT Ethernet",
   1133 	  WM_T_80003,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1136 	  "i80003 dual 1000baseX Ethernet",
   1137 	  WM_T_80003,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1140 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1141 	  WM_T_80003,		WMP_F_SERDES },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1144 	  "Intel i80003 1000baseT Ethernet",
   1145 	  WM_T_80003,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1148 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1149 	  WM_T_80003,		WMP_F_SERDES },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1152 	  "Intel i82801H (M_AMT) LAN Controller",
   1153 	  WM_T_ICH8,		WMP_F_COPPER },
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1155 	  "Intel i82801H (AMT) LAN Controller",
   1156 	  WM_T_ICH8,		WMP_F_COPPER },
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1158 	  "Intel i82801H LAN Controller",
   1159 	  WM_T_ICH8,		WMP_F_COPPER },
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1161 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1162 	  WM_T_ICH8,		WMP_F_COPPER },
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1164 	  "Intel i82801H (M) LAN Controller",
   1165 	  WM_T_ICH8,		WMP_F_COPPER },
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1167 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1168 	  WM_T_ICH8,		WMP_F_COPPER },
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1170 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1171 	  WM_T_ICH8,		WMP_F_COPPER },
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1173 	  "82567V-3 LAN Controller",
   1174 	  WM_T_ICH8,		WMP_F_COPPER },
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1176 	  "82801I (AMT) LAN Controller",
   1177 	  WM_T_ICH9,		WMP_F_COPPER },
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1179 	  "82801I 10/100 LAN Controller",
   1180 	  WM_T_ICH9,		WMP_F_COPPER },
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1182 	  "82801I (G) 10/100 LAN Controller",
   1183 	  WM_T_ICH9,		WMP_F_COPPER },
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1185 	  "82801I (GT) 10/100 LAN Controller",
   1186 	  WM_T_ICH9,		WMP_F_COPPER },
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1188 	  "82801I (C) LAN Controller",
   1189 	  WM_T_ICH9,		WMP_F_COPPER },
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1191 	  "82801I mobile LAN Controller",
   1192 	  WM_T_ICH9,		WMP_F_COPPER },
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1194 	  "82801I mobile (V) LAN Controller",
   1195 	  WM_T_ICH9,		WMP_F_COPPER },
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1197 	  "82801I mobile (AMT) LAN Controller",
   1198 	  WM_T_ICH9,		WMP_F_COPPER },
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1200 	  "82567LM-4 LAN Controller",
   1201 	  WM_T_ICH9,		WMP_F_COPPER },
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1203 	  "82567LM-2 LAN Controller",
   1204 	  WM_T_ICH10,		WMP_F_COPPER },
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1206 	  "82567LF-2 LAN Controller",
   1207 	  WM_T_ICH10,		WMP_F_COPPER },
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1209 	  "82567LM-3 LAN Controller",
   1210 	  WM_T_ICH10,		WMP_F_COPPER },
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1212 	  "82567LF-3 LAN Controller",
   1213 	  WM_T_ICH10,		WMP_F_COPPER },
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1215 	  "82567V-2 LAN Controller",
   1216 	  WM_T_ICH10,		WMP_F_COPPER },
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1218 	  "82567V-3? LAN Controller",
   1219 	  WM_T_ICH10,		WMP_F_COPPER },
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1221 	  "HANKSVILLE LAN Controller",
   1222 	  WM_T_ICH10,		WMP_F_COPPER },
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1224 	  "PCH LAN (82577LM) Controller",
   1225 	  WM_T_PCH,		WMP_F_COPPER },
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1227 	  "PCH LAN (82577LC) Controller",
   1228 	  WM_T_PCH,		WMP_F_COPPER },
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1230 	  "PCH LAN (82578DM) Controller",
   1231 	  WM_T_PCH,		WMP_F_COPPER },
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1233 	  "PCH LAN (82578DC) Controller",
   1234 	  WM_T_PCH,		WMP_F_COPPER },
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1236 	  "PCH2 LAN (82579LM) Controller",
   1237 	  WM_T_PCH2,		WMP_F_COPPER },
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1239 	  "PCH2 LAN (82579V) Controller",
   1240 	  WM_T_PCH2,		WMP_F_COPPER },
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1242 	  "82575EB dual-1000baseT Ethernet",
   1243 	  WM_T_82575,		WMP_F_COPPER },
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1245 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1246 	  WM_T_82575,		WMP_F_SERDES },
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1248 	  "82575GB quad-1000baseT Ethernet",
   1249 	  WM_T_82575,		WMP_F_COPPER },
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1251 	  "82575GB quad-1000baseT Ethernet (PM)",
   1252 	  WM_T_82575,		WMP_F_COPPER },
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1254 	  "82576 1000BaseT Ethernet",
   1255 	  WM_T_82576,		WMP_F_COPPER },
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1257 	  "82576 1000BaseX Ethernet",
   1258 	  WM_T_82576,		WMP_F_FIBER },
   1259 
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1261 	  "82576 gigabit Ethernet (SERDES)",
   1262 	  WM_T_82576,		WMP_F_SERDES },
   1263 
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1265 	  "82576 quad-1000BaseT Ethernet",
   1266 	  WM_T_82576,		WMP_F_COPPER },
   1267 
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1269 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1270 	  WM_T_82576,		WMP_F_COPPER },
   1271 
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1273 	  "82576 gigabit Ethernet",
   1274 	  WM_T_82576,		WMP_F_COPPER },
   1275 
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1277 	  "82576 gigabit Ethernet (SERDES)",
   1278 	  WM_T_82576,		WMP_F_SERDES },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1280 	  "82576 quad-gigabit Ethernet (SERDES)",
   1281 	  WM_T_82576,		WMP_F_SERDES },
   1282 
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1284 	  "82580 1000BaseT Ethernet",
   1285 	  WM_T_82580,		WMP_F_COPPER },
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1287 	  "82580 1000BaseX Ethernet",
   1288 	  WM_T_82580,		WMP_F_FIBER },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1291 	  "82580 1000BaseT Ethernet (SERDES)",
   1292 	  WM_T_82580,		WMP_F_SERDES },
   1293 
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1295 	  "82580 gigabit Ethernet (SGMII)",
   1296 	  WM_T_82580,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1298 	  "82580 dual-1000BaseT Ethernet",
   1299 	  WM_T_82580,		WMP_F_COPPER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1302 	  "82580 quad-1000BaseX Ethernet",
   1303 	  WM_T_82580,		WMP_F_FIBER },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1306 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1307 	  WM_T_82580,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1310 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1311 	  WM_T_82580,		WMP_F_SERDES },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1314 	  "DH89XXCC 1000BASE-KX Ethernet",
   1315 	  WM_T_82580,		WMP_F_SERDES },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1318 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1319 	  WM_T_82580,		WMP_F_SERDES },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1322 	  "I350 Gigabit Network Connection",
   1323 	  WM_T_I350,		WMP_F_COPPER },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1326 	  "I350 Gigabit Fiber Network Connection",
   1327 	  WM_T_I350,		WMP_F_FIBER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1330 	  "I350 Gigabit Backplane Connection",
   1331 	  WM_T_I350,		WMP_F_SERDES },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1334 	  "I350 Quad Port Gigabit Ethernet",
   1335 	  WM_T_I350,		WMP_F_SERDES },
   1336 
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1338 	  "I350 Gigabit Connection",
   1339 	  WM_T_I350,		WMP_F_COPPER },
   1340 
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1342 	  "I354 Gigabit Ethernet (KX)",
   1343 	  WM_T_I354,		WMP_F_SERDES },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1346 	  "I354 Gigabit Ethernet (SGMII)",
   1347 	  WM_T_I354,		WMP_F_COPPER },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1350 	  "I354 Gigabit Ethernet (2.5G)",
   1351 	  WM_T_I354,		WMP_F_COPPER },
   1352 
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1354 	  "I210-T1 Ethernet Server Adapter",
   1355 	  WM_T_I210,		WMP_F_COPPER },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1358 	  "I210 Ethernet (Copper OEM)",
   1359 	  WM_T_I210,		WMP_F_COPPER },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1362 	  "I210 Ethernet (Copper IT)",
   1363 	  WM_T_I210,		WMP_F_COPPER },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1366 	  "I210 Ethernet (FLASH less)",
   1367 	  WM_T_I210,		WMP_F_COPPER },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1370 	  "I210 Gigabit Ethernet (Fiber)",
   1371 	  WM_T_I210,		WMP_F_FIBER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1374 	  "I210 Gigabit Ethernet (SERDES)",
   1375 	  WM_T_I210,		WMP_F_SERDES },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1378 	  "I210 Gigabit Ethernet (FLASH less)",
   1379 	  WM_T_I210,		WMP_F_SERDES },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1382 	  "I210 Gigabit Ethernet (SGMII)",
   1383 	  WM_T_I210,		WMP_F_COPPER },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1386 	  "I211 Ethernet (COPPER)",
   1387 	  WM_T_I211,		WMP_F_COPPER },
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1389 	  "I217 V Ethernet Connection",
   1390 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1392 	  "I217 LM Ethernet Connection",
   1393 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1395 	  "I218 V Ethernet Connection",
   1396 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1398 	  "I218 V Ethernet Connection",
   1399 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1401 	  "I218 V Ethernet Connection",
   1402 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1404 	  "I218 LM Ethernet Connection",
   1405 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1407 	  "I218 LM Ethernet Connection",
   1408 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1410 	  "I218 LM Ethernet Connection",
   1411 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1412 #if 0
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1414 	  "I219 V Ethernet Connection",
   1415 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1417 	  "I219 V Ethernet Connection",
   1418 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1420 	  "I219 V Ethernet Connection",
   1421 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1423 	  "I219 V Ethernet Connection",
   1424 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1426 	  "I219 LM Ethernet Connection",
   1427 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1429 	  "I219 LM Ethernet Connection",
   1430 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1432 	  "I219 LM Ethernet Connection",
   1433 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1435 	  "I219 LM Ethernet Connection",
   1436 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1438 	  "I219 LM Ethernet Connection",
   1439 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1440 #endif
   1441 	{ 0,			0,
   1442 	  NULL,
   1443 	  0,			0 },
   1444 };
   1445 
   1446 /*
   1447  * Register read/write functions.
   1448  * Other than CSR_{READ|WRITE}().
   1449  */
   1450 
   1451 #if 0 /* Not currently used */
   1452 static inline uint32_t
   1453 wm_io_read(struct wm_softc *sc, int reg)
   1454 {
   1455 
   1456 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1457 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1458 }
   1459 #endif
   1460 
   1461 static inline void
   1462 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1463 {
   1464 
   1465 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1466 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1467 }
   1468 
   1469 static inline void
   1470 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1471     uint32_t data)
   1472 {
   1473 	uint32_t regval;
   1474 	int i;
   1475 
   1476 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1477 
   1478 	CSR_WRITE(sc, reg, regval);
   1479 
   1480 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1481 		delay(5);
   1482 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1483 			break;
   1484 	}
   1485 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1486 		aprint_error("%s: WARNING:"
   1487 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1488 		    device_xname(sc->sc_dev), reg);
   1489 	}
   1490 }
   1491 
   1492 static inline void
   1493 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1494 {
   1495 	wa->wa_low = htole32(v & 0xffffffffU);
   1496 	if (sizeof(bus_addr_t) == 8)
   1497 		wa->wa_high = htole32((uint64_t) v >> 32);
   1498 	else
   1499 		wa->wa_high = 0;
   1500 }
   1501 
   1502 /*
   1503  * Descriptor sync/init functions.
   1504  */
   1505 static inline void
   1506 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1507 {
   1508 	struct wm_softc *sc = txq->txq_sc;
   1509 
   1510 	/* If it will wrap around, sync to the end of the ring. */
   1511 	if ((start + num) > WM_NTXDESC(txq)) {
   1512 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1513 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1514 		    (WM_NTXDESC(txq) - start), ops);
   1515 		num -= (WM_NTXDESC(txq) - start);
   1516 		start = 0;
   1517 	}
   1518 
   1519 	/* Now sync whatever is left. */
   1520 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1521 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1522 }
   1523 
   1524 static inline void
   1525 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1526 {
   1527 	struct wm_softc *sc = rxq->rxq_sc;
   1528 
   1529 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1530 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1531 }
   1532 
   1533 static inline void
   1534 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1535 {
   1536 	struct wm_softc *sc = rxq->rxq_sc;
   1537 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1538 	struct mbuf *m = rxs->rxs_mbuf;
   1539 
   1540 	/*
   1541 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1542 	 * so that the payload after the Ethernet header is aligned
   1543 	 * to a 4-byte boundary.
   1544 
   1545 	 * XXX BRAINDAMAGE ALERT!
   1546 	 * The stupid chip uses the same size for every buffer, which
   1547 	 * is set in the Receive Control register.  We are using the 2K
   1548 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1549 	 * reason, we can't "scoot" packets longer than the standard
   1550 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1551 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1552 	 * the upper layer copy the headers.
   1553 	 */
   1554 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1555 
   1556 	if (sc->sc_type == WM_T_82574) {
   1557 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1558 		rxd->erx_data.erxd_addr =
   1559 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1560 		rxd->erx_data.erxd_dd = 0;
   1561 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1562 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1563 
   1564 		rxd->nqrx_data.nrxd_paddr =
   1565 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1566 		/* Currently, split header is not supported. */
   1567 		rxd->nqrx_data.nrxd_haddr = 0;
   1568 	} else {
   1569 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1570 
   1571 		wm_set_dma_addr(&rxd->wrx_addr,
   1572 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1573 		rxd->wrx_len = 0;
   1574 		rxd->wrx_cksum = 0;
   1575 		rxd->wrx_status = 0;
   1576 		rxd->wrx_errors = 0;
   1577 		rxd->wrx_special = 0;
   1578 	}
   1579 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1580 
   1581 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1582 }
   1583 
   1584 /*
   1585  * Device driver interface functions and commonly used functions.
   1586  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1587  */
   1588 
   1589 /* Lookup supported device table */
   1590 static const struct wm_product *
   1591 wm_lookup(const struct pci_attach_args *pa)
   1592 {
   1593 	const struct wm_product *wmp;
   1594 
   1595 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1596 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1597 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1598 			return wmp;
   1599 	}
   1600 	return NULL;
   1601 }
   1602 
   1603 /* The match function (ca_match) */
   1604 static int
   1605 wm_match(device_t parent, cfdata_t cf, void *aux)
   1606 {
   1607 	struct pci_attach_args *pa = aux;
   1608 
   1609 	if (wm_lookup(pa) != NULL)
   1610 		return 1;
   1611 
   1612 	return 0;
   1613 }
   1614 
   1615 /* The attach function (ca_attach) */
   1616 static void
   1617 wm_attach(device_t parent, device_t self, void *aux)
   1618 {
   1619 	struct wm_softc *sc = device_private(self);
   1620 	struct pci_attach_args *pa = aux;
   1621 	prop_dictionary_t dict;
   1622 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1623 	pci_chipset_tag_t pc = pa->pa_pc;
   1624 	int counts[PCI_INTR_TYPE_SIZE];
   1625 	pci_intr_type_t max_type;
   1626 	const char *eetype, *xname;
   1627 	bus_space_tag_t memt;
   1628 	bus_space_handle_t memh;
   1629 	bus_size_t memsize;
   1630 	int memh_valid;
   1631 	int i, error;
   1632 	const struct wm_product *wmp;
   1633 	prop_data_t ea;
   1634 	prop_number_t pn;
   1635 	uint8_t enaddr[ETHER_ADDR_LEN];
   1636 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1637 	pcireg_t preg, memtype;
   1638 	uint16_t eeprom_data, apme_mask;
   1639 	bool force_clear_smbi;
   1640 	uint32_t link_mode;
   1641 	uint32_t reg;
   1642 	void (*deferred_start_func)(struct ifnet *) = NULL;
   1643 
   1644 	sc->sc_dev = self;
   1645 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1646 	sc->sc_core_stopping = false;
   1647 
   1648 	wmp = wm_lookup(pa);
   1649 #ifdef DIAGNOSTIC
   1650 	if (wmp == NULL) {
   1651 		printf("\n");
   1652 		panic("wm_attach: impossible");
   1653 	}
   1654 #endif
   1655 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1656 
   1657 	sc->sc_pc = pa->pa_pc;
   1658 	sc->sc_pcitag = pa->pa_tag;
   1659 
   1660 	if (pci_dma64_available(pa))
   1661 		sc->sc_dmat = pa->pa_dmat64;
   1662 	else
   1663 		sc->sc_dmat = pa->pa_dmat;
   1664 
   1665 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1666 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1667 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1668 
   1669 	sc->sc_type = wmp->wmp_type;
   1670 
   1671 	/* Set default function pointers */
   1672 	sc->phy.acquire = wm_get_null;
   1673 	sc->phy.release = wm_put_null;
   1674 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1675 
   1676 	if (sc->sc_type < WM_T_82543) {
   1677 		if (sc->sc_rev < 2) {
   1678 			aprint_error_dev(sc->sc_dev,
   1679 			    "i82542 must be at least rev. 2\n");
   1680 			return;
   1681 		}
   1682 		if (sc->sc_rev < 3)
   1683 			sc->sc_type = WM_T_82542_2_0;
   1684 	}
   1685 
   1686 	/*
   1687 	 * Disable MSI for Errata:
   1688 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1689 	 *
   1690 	 *  82544: Errata 25
   1691 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1692 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1693 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1694 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1695 	 *
   1696 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1697 	 *
   1698 	 *  82571 & 82572: Errata 63
   1699 	 */
   1700 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1701 	    || (sc->sc_type == WM_T_82572))
   1702 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1703 
   1704 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1705 	    || (sc->sc_type == WM_T_82580)
   1706 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1707 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1708 		sc->sc_flags |= WM_F_NEWQUEUE;
   1709 
   1710 	/* Set device properties (mactype) */
   1711 	dict = device_properties(sc->sc_dev);
   1712 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1713 
   1714 	/*
   1715 	 * Map the device.  All devices support memory-mapped acccess,
   1716 	 * and it is really required for normal operation.
   1717 	 */
   1718 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1719 	switch (memtype) {
   1720 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1721 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1722 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1723 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1724 		break;
   1725 	default:
   1726 		memh_valid = 0;
   1727 		break;
   1728 	}
   1729 
   1730 	if (memh_valid) {
   1731 		sc->sc_st = memt;
   1732 		sc->sc_sh = memh;
   1733 		sc->sc_ss = memsize;
   1734 	} else {
   1735 		aprint_error_dev(sc->sc_dev,
   1736 		    "unable to map device registers\n");
   1737 		return;
   1738 	}
   1739 
   1740 	/*
   1741 	 * In addition, i82544 and later support I/O mapped indirect
   1742 	 * register access.  It is not desirable (nor supported in
   1743 	 * this driver) to use it for normal operation, though it is
   1744 	 * required to work around bugs in some chip versions.
   1745 	 */
   1746 	if (sc->sc_type >= WM_T_82544) {
   1747 		/* First we have to find the I/O BAR. */
   1748 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1749 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1750 			if (memtype == PCI_MAPREG_TYPE_IO)
   1751 				break;
   1752 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1753 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1754 				i += 4;	/* skip high bits, too */
   1755 		}
   1756 		if (i < PCI_MAPREG_END) {
   1757 			/*
   1758 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1759 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1760 			 * It's no problem because newer chips has no this
   1761 			 * bug.
   1762 			 *
   1763 			 * The i8254x doesn't apparently respond when the
   1764 			 * I/O BAR is 0, which looks somewhat like it's not
   1765 			 * been configured.
   1766 			 */
   1767 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1768 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1769 				aprint_error_dev(sc->sc_dev,
   1770 				    "WARNING: I/O BAR at zero.\n");
   1771 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1772 					0, &sc->sc_iot, &sc->sc_ioh,
   1773 					NULL, &sc->sc_ios) == 0) {
   1774 				sc->sc_flags |= WM_F_IOH_VALID;
   1775 			} else {
   1776 				aprint_error_dev(sc->sc_dev,
   1777 				    "WARNING: unable to map I/O space\n");
   1778 			}
   1779 		}
   1780 
   1781 	}
   1782 
   1783 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1784 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1785 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1786 	if (sc->sc_type < WM_T_82542_2_1)
   1787 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1788 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1789 
   1790 	/* power up chip */
   1791 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1792 	    NULL)) && error != EOPNOTSUPP) {
   1793 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1794 		return;
   1795 	}
   1796 
   1797 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1798 
   1799 	/* Allocation settings */
   1800 	max_type = PCI_INTR_TYPE_MSIX;
   1801 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1802 	counts[PCI_INTR_TYPE_MSI] = 1;
   1803 	counts[PCI_INTR_TYPE_INTX] = 1;
   1804 
   1805 alloc_retry:
   1806 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1807 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1808 		return;
   1809 	}
   1810 
   1811 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1812 		error = wm_setup_msix(sc);
   1813 		if (error) {
   1814 			pci_intr_release(pc, sc->sc_intrs,
   1815 			    counts[PCI_INTR_TYPE_MSIX]);
   1816 
   1817 			/* Setup for MSI: Disable MSI-X */
   1818 			max_type = PCI_INTR_TYPE_MSI;
   1819 			counts[PCI_INTR_TYPE_MSI] = 1;
   1820 			counts[PCI_INTR_TYPE_INTX] = 1;
   1821 			goto alloc_retry;
   1822 		}
   1823 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1824 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1825 		error = wm_setup_legacy(sc);
   1826 		if (error) {
   1827 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1828 			    counts[PCI_INTR_TYPE_MSI]);
   1829 
   1830 			/* The next try is for INTx: Disable MSI */
   1831 			max_type = PCI_INTR_TYPE_INTX;
   1832 			counts[PCI_INTR_TYPE_INTX] = 1;
   1833 			goto alloc_retry;
   1834 		}
   1835 	} else {
   1836 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1837 		error = wm_setup_legacy(sc);
   1838 		if (error) {
   1839 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1840 			    counts[PCI_INTR_TYPE_INTX]);
   1841 			return;
   1842 		}
   1843 	}
   1844 
   1845 	/*
   1846 	 * Check the function ID (unit number of the chip).
   1847 	 */
   1848 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1849 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1850 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1851 	    || (sc->sc_type == WM_T_82580)
   1852 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1853 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1854 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1855 	else
   1856 		sc->sc_funcid = 0;
   1857 
   1858 	/*
   1859 	 * Determine a few things about the bus we're connected to.
   1860 	 */
   1861 	if (sc->sc_type < WM_T_82543) {
   1862 		/* We don't really know the bus characteristics here. */
   1863 		sc->sc_bus_speed = 33;
   1864 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1865 		/*
   1866 		 * CSA (Communication Streaming Architecture) is about as fast
   1867 		 * a 32-bit 66MHz PCI Bus.
   1868 		 */
   1869 		sc->sc_flags |= WM_F_CSA;
   1870 		sc->sc_bus_speed = 66;
   1871 		aprint_verbose_dev(sc->sc_dev,
   1872 		    "Communication Streaming Architecture\n");
   1873 		if (sc->sc_type == WM_T_82547) {
   1874 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1875 			callout_setfunc(&sc->sc_txfifo_ch,
   1876 					wm_82547_txfifo_stall, sc);
   1877 			aprint_verbose_dev(sc->sc_dev,
   1878 			    "using 82547 Tx FIFO stall work-around\n");
   1879 		}
   1880 	} else if (sc->sc_type >= WM_T_82571) {
   1881 		sc->sc_flags |= WM_F_PCIE;
   1882 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1883 		    && (sc->sc_type != WM_T_ICH10)
   1884 		    && (sc->sc_type != WM_T_PCH)
   1885 		    && (sc->sc_type != WM_T_PCH2)
   1886 		    && (sc->sc_type != WM_T_PCH_LPT)
   1887 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1888 			/* ICH* and PCH* have no PCIe capability registers */
   1889 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1890 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1891 				NULL) == 0)
   1892 				aprint_error_dev(sc->sc_dev,
   1893 				    "unable to find PCIe capability\n");
   1894 		}
   1895 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1896 	} else {
   1897 		reg = CSR_READ(sc, WMREG_STATUS);
   1898 		if (reg & STATUS_BUS64)
   1899 			sc->sc_flags |= WM_F_BUS64;
   1900 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1901 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1902 
   1903 			sc->sc_flags |= WM_F_PCIX;
   1904 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1905 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1906 				aprint_error_dev(sc->sc_dev,
   1907 				    "unable to find PCIX capability\n");
   1908 			else if (sc->sc_type != WM_T_82545_3 &&
   1909 				 sc->sc_type != WM_T_82546_3) {
   1910 				/*
   1911 				 * Work around a problem caused by the BIOS
   1912 				 * setting the max memory read byte count
   1913 				 * incorrectly.
   1914 				 */
   1915 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1916 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1917 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1918 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1919 
   1920 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1921 				    PCIX_CMD_BYTECNT_SHIFT;
   1922 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1923 				    PCIX_STATUS_MAXB_SHIFT;
   1924 				if (bytecnt > maxb) {
   1925 					aprint_verbose_dev(sc->sc_dev,
   1926 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1927 					    512 << bytecnt, 512 << maxb);
   1928 					pcix_cmd = (pcix_cmd &
   1929 					    ~PCIX_CMD_BYTECNT_MASK) |
   1930 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1931 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1932 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1933 					    pcix_cmd);
   1934 				}
   1935 			}
   1936 		}
   1937 		/*
   1938 		 * The quad port adapter is special; it has a PCIX-PCIX
   1939 		 * bridge on the board, and can run the secondary bus at
   1940 		 * a higher speed.
   1941 		 */
   1942 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1943 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1944 								      : 66;
   1945 		} else if (sc->sc_flags & WM_F_PCIX) {
   1946 			switch (reg & STATUS_PCIXSPD_MASK) {
   1947 			case STATUS_PCIXSPD_50_66:
   1948 				sc->sc_bus_speed = 66;
   1949 				break;
   1950 			case STATUS_PCIXSPD_66_100:
   1951 				sc->sc_bus_speed = 100;
   1952 				break;
   1953 			case STATUS_PCIXSPD_100_133:
   1954 				sc->sc_bus_speed = 133;
   1955 				break;
   1956 			default:
   1957 				aprint_error_dev(sc->sc_dev,
   1958 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1959 				    reg & STATUS_PCIXSPD_MASK);
   1960 				sc->sc_bus_speed = 66;
   1961 				break;
   1962 			}
   1963 		} else
   1964 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1965 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1966 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1967 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1968 	}
   1969 
   1970 	/* clear interesting stat counters */
   1971 	CSR_READ(sc, WMREG_COLC);
   1972 	CSR_READ(sc, WMREG_RXERRC);
   1973 
   1974 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1975 	    || (sc->sc_type >= WM_T_ICH8))
   1976 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1977 	if (sc->sc_type >= WM_T_ICH8)
   1978 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1979 
   1980 	/* Set PHY, NVM mutex related stuff */
   1981 	switch (sc->sc_type) {
   1982 	case WM_T_82542_2_0:
   1983 	case WM_T_82542_2_1:
   1984 	case WM_T_82543:
   1985 	case WM_T_82544:
   1986 		/* Microwire */
   1987 		sc->sc_nvm_wordsize = 64;
   1988 		sc->sc_nvm_addrbits = 6;
   1989 		break;
   1990 	case WM_T_82540:
   1991 	case WM_T_82545:
   1992 	case WM_T_82545_3:
   1993 	case WM_T_82546:
   1994 	case WM_T_82546_3:
   1995 		/* Microwire */
   1996 		reg = CSR_READ(sc, WMREG_EECD);
   1997 		if (reg & EECD_EE_SIZE) {
   1998 			sc->sc_nvm_wordsize = 256;
   1999 			sc->sc_nvm_addrbits = 8;
   2000 		} else {
   2001 			sc->sc_nvm_wordsize = 64;
   2002 			sc->sc_nvm_addrbits = 6;
   2003 		}
   2004 		sc->sc_flags |= WM_F_LOCK_EECD;
   2005 		break;
   2006 	case WM_T_82541:
   2007 	case WM_T_82541_2:
   2008 	case WM_T_82547:
   2009 	case WM_T_82547_2:
   2010 		sc->sc_flags |= WM_F_LOCK_EECD;
   2011 		reg = CSR_READ(sc, WMREG_EECD);
   2012 		if (reg & EECD_EE_TYPE) {
   2013 			/* SPI */
   2014 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2015 			wm_nvm_set_addrbits_size_eecd(sc);
   2016 		} else {
   2017 			/* Microwire */
   2018 			if ((reg & EECD_EE_ABITS) != 0) {
   2019 				sc->sc_nvm_wordsize = 256;
   2020 				sc->sc_nvm_addrbits = 8;
   2021 			} else {
   2022 				sc->sc_nvm_wordsize = 64;
   2023 				sc->sc_nvm_addrbits = 6;
   2024 			}
   2025 		}
   2026 		break;
   2027 	case WM_T_82571:
   2028 	case WM_T_82572:
   2029 		/* SPI */
   2030 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2031 		wm_nvm_set_addrbits_size_eecd(sc);
   2032 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2033 		sc->phy.acquire = wm_get_swsm_semaphore;
   2034 		sc->phy.release = wm_put_swsm_semaphore;
   2035 		break;
   2036 	case WM_T_82573:
   2037 	case WM_T_82574:
   2038 	case WM_T_82583:
   2039 		if (sc->sc_type == WM_T_82573) {
   2040 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2041 			sc->phy.acquire = wm_get_swsm_semaphore;
   2042 			sc->phy.release = wm_put_swsm_semaphore;
   2043 		} else {
   2044 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2045 			/* Both PHY and NVM use the same semaphore. */
   2046 			sc->phy.acquire
   2047 			    = wm_get_swfwhw_semaphore;
   2048 			sc->phy.release
   2049 			    = wm_put_swfwhw_semaphore;
   2050 		}
   2051 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2052 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2053 			sc->sc_nvm_wordsize = 2048;
   2054 		} else {
   2055 			/* SPI */
   2056 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2057 			wm_nvm_set_addrbits_size_eecd(sc);
   2058 		}
   2059 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2060 		break;
   2061 	case WM_T_82575:
   2062 	case WM_T_82576:
   2063 	case WM_T_82580:
   2064 	case WM_T_I350:
   2065 	case WM_T_I354:
   2066 	case WM_T_80003:
   2067 		/* SPI */
   2068 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2069 		wm_nvm_set_addrbits_size_eecd(sc);
   2070 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2071 		    | WM_F_LOCK_SWSM;
   2072 		sc->phy.acquire = wm_get_phy_82575;
   2073 		sc->phy.release = wm_put_phy_82575;
   2074 		break;
   2075 	case WM_T_ICH8:
   2076 	case WM_T_ICH9:
   2077 	case WM_T_ICH10:
   2078 	case WM_T_PCH:
   2079 	case WM_T_PCH2:
   2080 	case WM_T_PCH_LPT:
   2081 		/* FLASH */
   2082 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2083 		sc->sc_nvm_wordsize = 2048;
   2084 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2085 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2086 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2087 			aprint_error_dev(sc->sc_dev,
   2088 			    "can't map FLASH registers\n");
   2089 			goto out;
   2090 		}
   2091 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2092 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2093 		    ICH_FLASH_SECTOR_SIZE;
   2094 		sc->sc_ich8_flash_bank_size =
   2095 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2096 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2097 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2098 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2099 		sc->sc_flashreg_offset = 0;
   2100 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2101 		sc->phy.release = wm_put_swflag_ich8lan;
   2102 		break;
   2103 	case WM_T_PCH_SPT:
   2104 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2105 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2106 		sc->sc_flasht = sc->sc_st;
   2107 		sc->sc_flashh = sc->sc_sh;
   2108 		sc->sc_ich8_flash_base = 0;
   2109 		sc->sc_nvm_wordsize =
   2110 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2111 			* NVM_SIZE_MULTIPLIER;
   2112 		/* It is size in bytes, we want words */
   2113 		sc->sc_nvm_wordsize /= 2;
   2114 		/* assume 2 banks */
   2115 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2116 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2117 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2118 		sc->phy.release = wm_put_swflag_ich8lan;
   2119 		break;
   2120 	case WM_T_I210:
   2121 	case WM_T_I211:
   2122 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2123 			wm_nvm_set_addrbits_size_eecd(sc);
   2124 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2125 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2126 		} else {
   2127 			sc->sc_nvm_wordsize = INVM_SIZE;
   2128 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2129 		}
   2130 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2131 		sc->phy.acquire = wm_get_phy_82575;
   2132 		sc->phy.release = wm_put_phy_82575;
   2133 		break;
   2134 	default:
   2135 		break;
   2136 	}
   2137 
   2138 	/* Reset the chip to a known state. */
   2139 	wm_reset(sc);
   2140 
   2141 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2142 	switch (sc->sc_type) {
   2143 	case WM_T_82571:
   2144 	case WM_T_82572:
   2145 		reg = CSR_READ(sc, WMREG_SWSM2);
   2146 		if ((reg & SWSM2_LOCK) == 0) {
   2147 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2148 			force_clear_smbi = true;
   2149 		} else
   2150 			force_clear_smbi = false;
   2151 		break;
   2152 	case WM_T_82573:
   2153 	case WM_T_82574:
   2154 	case WM_T_82583:
   2155 		force_clear_smbi = true;
   2156 		break;
   2157 	default:
   2158 		force_clear_smbi = false;
   2159 		break;
   2160 	}
   2161 	if (force_clear_smbi) {
   2162 		reg = CSR_READ(sc, WMREG_SWSM);
   2163 		if ((reg & SWSM_SMBI) != 0)
   2164 			aprint_error_dev(sc->sc_dev,
   2165 			    "Please update the Bootagent\n");
   2166 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2167 	}
   2168 
   2169 	/*
   2170 	 * Defer printing the EEPROM type until after verifying the checksum
   2171 	 * This allows the EEPROM type to be printed correctly in the case
   2172 	 * that no EEPROM is attached.
   2173 	 */
   2174 	/*
   2175 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2176 	 * this for later, so we can fail future reads from the EEPROM.
   2177 	 */
   2178 	if (wm_nvm_validate_checksum(sc)) {
   2179 		/*
   2180 		 * Read twice again because some PCI-e parts fail the
   2181 		 * first check due to the link being in sleep state.
   2182 		 */
   2183 		if (wm_nvm_validate_checksum(sc))
   2184 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2185 	}
   2186 
   2187 	/* Set device properties (macflags) */
   2188 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2189 
   2190 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2191 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2192 	else {
   2193 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2194 		    sc->sc_nvm_wordsize);
   2195 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2196 			aprint_verbose("iNVM");
   2197 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2198 			aprint_verbose("FLASH(HW)");
   2199 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2200 			aprint_verbose("FLASH");
   2201 		else {
   2202 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2203 				eetype = "SPI";
   2204 			else
   2205 				eetype = "MicroWire";
   2206 			aprint_verbose("(%d address bits) %s EEPROM",
   2207 			    sc->sc_nvm_addrbits, eetype);
   2208 		}
   2209 	}
   2210 	wm_nvm_version(sc);
   2211 	aprint_verbose("\n");
   2212 
   2213 	/* Check for I21[01] PLL workaround */
   2214 	if (sc->sc_type == WM_T_I210)
   2215 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2216 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2217 		/* NVM image release 3.25 has a workaround */
   2218 		if ((sc->sc_nvm_ver_major < 3)
   2219 		    || ((sc->sc_nvm_ver_major == 3)
   2220 			&& (sc->sc_nvm_ver_minor < 25))) {
   2221 			aprint_verbose_dev(sc->sc_dev,
   2222 			    "ROM image version %d.%d is older than 3.25\n",
   2223 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2224 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2225 		}
   2226 	}
   2227 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2228 		wm_pll_workaround_i210(sc);
   2229 
   2230 	wm_get_wakeup(sc);
   2231 
   2232 	/* Non-AMT based hardware can now take control from firmware */
   2233 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2234 		wm_get_hw_control(sc);
   2235 
   2236 	/*
   2237 	 * Read the Ethernet address from the EEPROM, if not first found
   2238 	 * in device properties.
   2239 	 */
   2240 	ea = prop_dictionary_get(dict, "mac-address");
   2241 	if (ea != NULL) {
   2242 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2243 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2244 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2245 	} else {
   2246 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2247 			aprint_error_dev(sc->sc_dev,
   2248 			    "unable to read Ethernet address\n");
   2249 			goto out;
   2250 		}
   2251 	}
   2252 
   2253 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2254 	    ether_sprintf(enaddr));
   2255 
   2256 	/*
   2257 	 * Read the config info from the EEPROM, and set up various
   2258 	 * bits in the control registers based on their contents.
   2259 	 */
   2260 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2261 	if (pn != NULL) {
   2262 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2263 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2264 	} else {
   2265 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2266 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2267 			goto out;
   2268 		}
   2269 	}
   2270 
   2271 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2272 	if (pn != NULL) {
   2273 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2274 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2275 	} else {
   2276 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2277 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2278 			goto out;
   2279 		}
   2280 	}
   2281 
   2282 	/* check for WM_F_WOL */
   2283 	switch (sc->sc_type) {
   2284 	case WM_T_82542_2_0:
   2285 	case WM_T_82542_2_1:
   2286 	case WM_T_82543:
   2287 		/* dummy? */
   2288 		eeprom_data = 0;
   2289 		apme_mask = NVM_CFG3_APME;
   2290 		break;
   2291 	case WM_T_82544:
   2292 		apme_mask = NVM_CFG2_82544_APM_EN;
   2293 		eeprom_data = cfg2;
   2294 		break;
   2295 	case WM_T_82546:
   2296 	case WM_T_82546_3:
   2297 	case WM_T_82571:
   2298 	case WM_T_82572:
   2299 	case WM_T_82573:
   2300 	case WM_T_82574:
   2301 	case WM_T_82583:
   2302 	case WM_T_80003:
   2303 	default:
   2304 		apme_mask = NVM_CFG3_APME;
   2305 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2306 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2307 		break;
   2308 	case WM_T_82575:
   2309 	case WM_T_82576:
   2310 	case WM_T_82580:
   2311 	case WM_T_I350:
   2312 	case WM_T_I354: /* XXX ok? */
   2313 	case WM_T_ICH8:
   2314 	case WM_T_ICH9:
   2315 	case WM_T_ICH10:
   2316 	case WM_T_PCH:
   2317 	case WM_T_PCH2:
   2318 	case WM_T_PCH_LPT:
   2319 	case WM_T_PCH_SPT:
   2320 		/* XXX The funcid should be checked on some devices */
   2321 		apme_mask = WUC_APME;
   2322 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2323 		break;
   2324 	}
   2325 
   2326 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2327 	if ((eeprom_data & apme_mask) != 0)
   2328 		sc->sc_flags |= WM_F_WOL;
   2329 #ifdef WM_DEBUG
   2330 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2331 		printf("WOL\n");
   2332 #endif
   2333 
   2334 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2335 		/* Check NVM for autonegotiation */
   2336 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2337 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2338 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2339 		}
   2340 	}
   2341 
   2342 	/*
   2343 	 * XXX need special handling for some multiple port cards
   2344 	 * to disable a paticular port.
   2345 	 */
   2346 
   2347 	if (sc->sc_type >= WM_T_82544) {
   2348 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2349 		if (pn != NULL) {
   2350 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2351 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2352 		} else {
   2353 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2354 				aprint_error_dev(sc->sc_dev,
   2355 				    "unable to read SWDPIN\n");
   2356 				goto out;
   2357 			}
   2358 		}
   2359 	}
   2360 
   2361 	if (cfg1 & NVM_CFG1_ILOS)
   2362 		sc->sc_ctrl |= CTRL_ILOS;
   2363 
   2364 	/*
   2365 	 * XXX
   2366 	 * This code isn't correct because pin 2 and 3 are located
   2367 	 * in different position on newer chips. Check all datasheet.
   2368 	 *
   2369 	 * Until resolve this problem, check if a chip < 82580
   2370 	 */
   2371 	if (sc->sc_type <= WM_T_82580) {
   2372 		if (sc->sc_type >= WM_T_82544) {
   2373 			sc->sc_ctrl |=
   2374 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2375 			    CTRL_SWDPIO_SHIFT;
   2376 			sc->sc_ctrl |=
   2377 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2378 			    CTRL_SWDPINS_SHIFT;
   2379 		} else {
   2380 			sc->sc_ctrl |=
   2381 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2382 			    CTRL_SWDPIO_SHIFT;
   2383 		}
   2384 	}
   2385 
   2386 	/* XXX For other than 82580? */
   2387 	if (sc->sc_type == WM_T_82580) {
   2388 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2389 		if (nvmword & __BIT(13))
   2390 			sc->sc_ctrl |= CTRL_ILOS;
   2391 	}
   2392 
   2393 #if 0
   2394 	if (sc->sc_type >= WM_T_82544) {
   2395 		if (cfg1 & NVM_CFG1_IPS0)
   2396 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2397 		if (cfg1 & NVM_CFG1_IPS1)
   2398 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2399 		sc->sc_ctrl_ext |=
   2400 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2401 		    CTRL_EXT_SWDPIO_SHIFT;
   2402 		sc->sc_ctrl_ext |=
   2403 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2404 		    CTRL_EXT_SWDPINS_SHIFT;
   2405 	} else {
   2406 		sc->sc_ctrl_ext |=
   2407 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2408 		    CTRL_EXT_SWDPIO_SHIFT;
   2409 	}
   2410 #endif
   2411 
   2412 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2413 #if 0
   2414 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2415 #endif
   2416 
   2417 	if (sc->sc_type == WM_T_PCH) {
   2418 		uint16_t val;
   2419 
   2420 		/* Save the NVM K1 bit setting */
   2421 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2422 
   2423 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2424 			sc->sc_nvm_k1_enabled = 1;
   2425 		else
   2426 			sc->sc_nvm_k1_enabled = 0;
   2427 	}
   2428 
   2429 	/*
   2430 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2431 	 * media structures accordingly.
   2432 	 */
   2433 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2434 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2435 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2436 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2437 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2438 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2439 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2440 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2441 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2442 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2443 	    || (sc->sc_type ==WM_T_I211)) {
   2444 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2445 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2446 		switch (link_mode) {
   2447 		case CTRL_EXT_LINK_MODE_1000KX:
   2448 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2449 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2450 			break;
   2451 		case CTRL_EXT_LINK_MODE_SGMII:
   2452 			if (wm_sgmii_uses_mdio(sc)) {
   2453 				aprint_verbose_dev(sc->sc_dev,
   2454 				    "SGMII(MDIO)\n");
   2455 				sc->sc_flags |= WM_F_SGMII;
   2456 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2457 				break;
   2458 			}
   2459 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2460 			/*FALLTHROUGH*/
   2461 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2462 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2463 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2464 				if (link_mode
   2465 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2466 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2467 					sc->sc_flags |= WM_F_SGMII;
   2468 				} else {
   2469 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2470 					aprint_verbose_dev(sc->sc_dev,
   2471 					    "SERDES\n");
   2472 				}
   2473 				break;
   2474 			}
   2475 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2476 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2477 
   2478 			/* Change current link mode setting */
   2479 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2480 			switch (sc->sc_mediatype) {
   2481 			case WM_MEDIATYPE_COPPER:
   2482 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2483 				break;
   2484 			case WM_MEDIATYPE_SERDES:
   2485 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2486 				break;
   2487 			default:
   2488 				break;
   2489 			}
   2490 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2491 			break;
   2492 		case CTRL_EXT_LINK_MODE_GMII:
   2493 		default:
   2494 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2495 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2496 			break;
   2497 		}
   2498 
   2499 		reg &= ~CTRL_EXT_I2C_ENA;
   2500 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2501 			reg |= CTRL_EXT_I2C_ENA;
   2502 		else
   2503 			reg &= ~CTRL_EXT_I2C_ENA;
   2504 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2505 
   2506 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2507 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2508 		else
   2509 			wm_tbi_mediainit(sc);
   2510 	} else if (sc->sc_type < WM_T_82543 ||
   2511 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2512 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2513 			aprint_error_dev(sc->sc_dev,
   2514 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2515 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2516 		}
   2517 		wm_tbi_mediainit(sc);
   2518 	} else {
   2519 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2520 			aprint_error_dev(sc->sc_dev,
   2521 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2522 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2523 		}
   2524 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2525 	}
   2526 
   2527 	ifp = &sc->sc_ethercom.ec_if;
   2528 	xname = device_xname(sc->sc_dev);
   2529 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2530 	ifp->if_softc = sc;
   2531 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2532 	ifp->if_extflags = IFEF_START_MPSAFE;
   2533 	ifp->if_ioctl = wm_ioctl;
   2534 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2535 		ifp->if_start = wm_nq_start;
   2536 		if (sc->sc_nqueues > 1) {
   2537 			ifp->if_transmit = wm_nq_transmit;
   2538 			deferred_start_func = wm_deferred_start;
   2539 		}
   2540 	} else {
   2541 		ifp->if_start = wm_start;
   2542 		if (sc->sc_nqueues > 1) {
   2543 			ifp->if_transmit = wm_transmit;
   2544 			deferred_start_func = wm_deferred_start;
   2545 		}
   2546 	}
   2547 	ifp->if_watchdog = wm_watchdog;
   2548 	ifp->if_init = wm_init;
   2549 	ifp->if_stop = wm_stop;
   2550 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2551 	IFQ_SET_READY(&ifp->if_snd);
   2552 
   2553 	/* Check for jumbo frame */
   2554 	switch (sc->sc_type) {
   2555 	case WM_T_82573:
   2556 		/* XXX limited to 9234 if ASPM is disabled */
   2557 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2558 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2559 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2560 		break;
   2561 	case WM_T_82571:
   2562 	case WM_T_82572:
   2563 	case WM_T_82574:
   2564 	case WM_T_82575:
   2565 	case WM_T_82576:
   2566 	case WM_T_82580:
   2567 	case WM_T_I350:
   2568 	case WM_T_I354: /* XXXX ok? */
   2569 	case WM_T_I210:
   2570 	case WM_T_I211:
   2571 	case WM_T_80003:
   2572 	case WM_T_ICH9:
   2573 	case WM_T_ICH10:
   2574 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2575 	case WM_T_PCH_LPT:
   2576 	case WM_T_PCH_SPT:
   2577 		/* XXX limited to 9234 */
   2578 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2579 		break;
   2580 	case WM_T_PCH:
   2581 		/* XXX limited to 4096 */
   2582 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2583 		break;
   2584 	case WM_T_82542_2_0:
   2585 	case WM_T_82542_2_1:
   2586 	case WM_T_82583:
   2587 	case WM_T_ICH8:
   2588 		/* No support for jumbo frame */
   2589 		break;
   2590 	default:
   2591 		/* ETHER_MAX_LEN_JUMBO */
   2592 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2593 		break;
   2594 	}
   2595 
   2596 	/* If we're a i82543 or greater, we can support VLANs. */
   2597 	if (sc->sc_type >= WM_T_82543)
   2598 		sc->sc_ethercom.ec_capabilities |=
   2599 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2600 
   2601 	/*
   2602 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2603 	 * on i82543 and later.
   2604 	 */
   2605 	if (sc->sc_type >= WM_T_82543) {
   2606 		ifp->if_capabilities |=
   2607 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2608 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2609 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2610 		    IFCAP_CSUM_TCPv6_Tx |
   2611 		    IFCAP_CSUM_UDPv6_Tx;
   2612 	}
   2613 
   2614 	/*
   2615 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2616 	 *
   2617 	 *	82541GI (8086:1076) ... no
   2618 	 *	82572EI (8086:10b9) ... yes
   2619 	 */
   2620 	if (sc->sc_type >= WM_T_82571) {
   2621 		ifp->if_capabilities |=
   2622 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2623 	}
   2624 
   2625 	/*
   2626 	 * If we're a i82544 or greater (except i82547), we can do
   2627 	 * TCP segmentation offload.
   2628 	 */
   2629 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2630 		ifp->if_capabilities |= IFCAP_TSOv4;
   2631 	}
   2632 
   2633 	if (sc->sc_type >= WM_T_82571) {
   2634 		ifp->if_capabilities |= IFCAP_TSOv6;
   2635 	}
   2636 
   2637 #ifdef WM_MPSAFE
   2638 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2639 #else
   2640 	sc->sc_core_lock = NULL;
   2641 #endif
   2642 
   2643 	/* Attach the interface. */
   2644 	if_initialize(ifp);
   2645 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2646 	if_deferred_start_init(ifp, deferred_start_func);
   2647 	ether_ifattach(ifp, enaddr);
   2648 	if_register(ifp);
   2649 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2650 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2651 			  RND_FLAG_DEFAULT);
   2652 
   2653 #ifdef WM_EVENT_COUNTERS
   2654 	/* Attach event counters. */
   2655 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2656 	    NULL, xname, "linkintr");
   2657 
   2658 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2659 	    NULL, xname, "tx_xoff");
   2660 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2661 	    NULL, xname, "tx_xon");
   2662 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2663 	    NULL, xname, "rx_xoff");
   2664 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2665 	    NULL, xname, "rx_xon");
   2666 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2667 	    NULL, xname, "rx_macctl");
   2668 #endif /* WM_EVENT_COUNTERS */
   2669 
   2670 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2671 		pmf_class_network_register(self, ifp);
   2672 	else
   2673 		aprint_error_dev(self, "couldn't establish power handler\n");
   2674 
   2675 	sc->sc_flags |= WM_F_ATTACHED;
   2676  out:
   2677 	return;
   2678 }
   2679 
   2680 /* The detach function (ca_detach) */
   2681 static int
   2682 wm_detach(device_t self, int flags __unused)
   2683 {
   2684 	struct wm_softc *sc = device_private(self);
   2685 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2686 	int i;
   2687 
   2688 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2689 		return 0;
   2690 
   2691 	/* Stop the interface. Callouts are stopped in it. */
   2692 	wm_stop(ifp, 1);
   2693 
   2694 	pmf_device_deregister(self);
   2695 
   2696 	/* Tell the firmware about the release */
   2697 	WM_CORE_LOCK(sc);
   2698 	wm_release_manageability(sc);
   2699 	wm_release_hw_control(sc);
   2700 	wm_enable_wakeup(sc);
   2701 	WM_CORE_UNLOCK(sc);
   2702 
   2703 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2704 
   2705 	/* Delete all remaining media. */
   2706 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2707 
   2708 	ether_ifdetach(ifp);
   2709 	if_detach(ifp);
   2710 	if_percpuq_destroy(sc->sc_ipq);
   2711 
   2712 	/* Unload RX dmamaps and free mbufs */
   2713 	for (i = 0; i < sc->sc_nqueues; i++) {
   2714 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2715 		mutex_enter(rxq->rxq_lock);
   2716 		wm_rxdrain(rxq);
   2717 		mutex_exit(rxq->rxq_lock);
   2718 	}
   2719 	/* Must unlock here */
   2720 
   2721 	/* Disestablish the interrupt handler */
   2722 	for (i = 0; i < sc->sc_nintrs; i++) {
   2723 		if (sc->sc_ihs[i] != NULL) {
   2724 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2725 			sc->sc_ihs[i] = NULL;
   2726 		}
   2727 	}
   2728 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2729 
   2730 	wm_free_txrx_queues(sc);
   2731 
   2732 	/* Unmap the registers */
   2733 	if (sc->sc_ss) {
   2734 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2735 		sc->sc_ss = 0;
   2736 	}
   2737 	if (sc->sc_ios) {
   2738 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2739 		sc->sc_ios = 0;
   2740 	}
   2741 	if (sc->sc_flashs) {
   2742 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2743 		sc->sc_flashs = 0;
   2744 	}
   2745 
   2746 	if (sc->sc_core_lock)
   2747 		mutex_obj_free(sc->sc_core_lock);
   2748 	if (sc->sc_ich_phymtx)
   2749 		mutex_obj_free(sc->sc_ich_phymtx);
   2750 	if (sc->sc_ich_nvmmtx)
   2751 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2752 
   2753 	return 0;
   2754 }
   2755 
   2756 static bool
   2757 wm_suspend(device_t self, const pmf_qual_t *qual)
   2758 {
   2759 	struct wm_softc *sc = device_private(self);
   2760 
   2761 	wm_release_manageability(sc);
   2762 	wm_release_hw_control(sc);
   2763 	wm_enable_wakeup(sc);
   2764 
   2765 	return true;
   2766 }
   2767 
   2768 static bool
   2769 wm_resume(device_t self, const pmf_qual_t *qual)
   2770 {
   2771 	struct wm_softc *sc = device_private(self);
   2772 
   2773 	wm_init_manageability(sc);
   2774 
   2775 	return true;
   2776 }
   2777 
   2778 /*
   2779  * wm_watchdog:		[ifnet interface function]
   2780  *
   2781  *	Watchdog timer handler.
   2782  */
   2783 static void
   2784 wm_watchdog(struct ifnet *ifp)
   2785 {
   2786 	int qid;
   2787 	struct wm_softc *sc = ifp->if_softc;
   2788 
   2789 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2790 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2791 
   2792 		wm_watchdog_txq(ifp, txq);
   2793 	}
   2794 
   2795 	/* Reset the interface. */
   2796 	(void) wm_init(ifp);
   2797 
   2798 	/*
   2799 	 * There are still some upper layer processing which call
   2800 	 * ifp->if_start(). e.g. ALTQ
   2801 	 */
   2802 	/* Try to get more packets going. */
   2803 	ifp->if_start(ifp);
   2804 }
   2805 
   2806 static void
   2807 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2808 {
   2809 	struct wm_softc *sc = ifp->if_softc;
   2810 
   2811 	/*
   2812 	 * Since we're using delayed interrupts, sweep up
   2813 	 * before we report an error.
   2814 	 */
   2815 	mutex_enter(txq->txq_lock);
   2816 	wm_txeof(sc, txq);
   2817 	mutex_exit(txq->txq_lock);
   2818 
   2819 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2820 #ifdef WM_DEBUG
   2821 		int i, j;
   2822 		struct wm_txsoft *txs;
   2823 #endif
   2824 		log(LOG_ERR,
   2825 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2826 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2827 		    txq->txq_next);
   2828 		ifp->if_oerrors++;
   2829 #ifdef WM_DEBUG
   2830 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2831 		    i = WM_NEXTTXS(txq, i)) {
   2832 		    txs = &txq->txq_soft[i];
   2833 		    printf("txs %d tx %d -> %d\n",
   2834 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2835 		    for (j = txs->txs_firstdesc; ;
   2836 			j = WM_NEXTTX(txq, j)) {
   2837 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2838 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2839 			printf("\t %#08x%08x\n",
   2840 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2841 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2842 			if (j == txs->txs_lastdesc)
   2843 				break;
   2844 			}
   2845 		}
   2846 #endif
   2847 	}
   2848 }
   2849 
   2850 /*
   2851  * wm_tick:
   2852  *
   2853  *	One second timer, used to check link status, sweep up
   2854  *	completed transmit jobs, etc.
   2855  */
   2856 static void
   2857 wm_tick(void *arg)
   2858 {
   2859 	struct wm_softc *sc = arg;
   2860 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2861 #ifndef WM_MPSAFE
   2862 	int s = splnet();
   2863 #endif
   2864 
   2865 	WM_CORE_LOCK(sc);
   2866 
   2867 	if (sc->sc_core_stopping)
   2868 		goto out;
   2869 
   2870 	if (sc->sc_type >= WM_T_82542_2_1) {
   2871 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2872 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2873 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2874 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2875 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2876 	}
   2877 
   2878 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2879 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2880 	    + CSR_READ(sc, WMREG_CRCERRS)
   2881 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2882 	    + CSR_READ(sc, WMREG_SYMERRC)
   2883 	    + CSR_READ(sc, WMREG_RXERRC)
   2884 	    + CSR_READ(sc, WMREG_SEC)
   2885 	    + CSR_READ(sc, WMREG_CEXTERR)
   2886 	    + CSR_READ(sc, WMREG_RLEC);
   2887 	/*
   2888 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2889 	 * memory. It does not mean the number of dropped packet. Because
   2890 	 * ethernet controller can receive packets in such case if there is
   2891 	 * space in phy's FIFO.
   2892 	 *
   2893 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2894 	 * own EVCNT instead of if_iqdrops.
   2895 	 */
   2896 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2897 
   2898 	if (sc->sc_flags & WM_F_HAS_MII)
   2899 		mii_tick(&sc->sc_mii);
   2900 	else if ((sc->sc_type >= WM_T_82575)
   2901 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2902 		wm_serdes_tick(sc);
   2903 	else
   2904 		wm_tbi_tick(sc);
   2905 
   2906 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2907 out:
   2908 	WM_CORE_UNLOCK(sc);
   2909 #ifndef WM_MPSAFE
   2910 	splx(s);
   2911 #endif
   2912 }
   2913 
   2914 static int
   2915 wm_ifflags_cb(struct ethercom *ec)
   2916 {
   2917 	struct ifnet *ifp = &ec->ec_if;
   2918 	struct wm_softc *sc = ifp->if_softc;
   2919 	int rc = 0;
   2920 
   2921 	WM_CORE_LOCK(sc);
   2922 
   2923 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2924 	sc->sc_if_flags = ifp->if_flags;
   2925 
   2926 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2927 		rc = ENETRESET;
   2928 		goto out;
   2929 	}
   2930 
   2931 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2932 		wm_set_filter(sc);
   2933 
   2934 	wm_set_vlan(sc);
   2935 
   2936 out:
   2937 	WM_CORE_UNLOCK(sc);
   2938 
   2939 	return rc;
   2940 }
   2941 
   2942 /*
   2943  * wm_ioctl:		[ifnet interface function]
   2944  *
   2945  *	Handle control requests from the operator.
   2946  */
   2947 static int
   2948 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2949 {
   2950 	struct wm_softc *sc = ifp->if_softc;
   2951 	struct ifreq *ifr = (struct ifreq *) data;
   2952 	struct ifaddr *ifa = (struct ifaddr *)data;
   2953 	struct sockaddr_dl *sdl;
   2954 	int s, error;
   2955 
   2956 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2957 		device_xname(sc->sc_dev), __func__));
   2958 
   2959 #ifndef WM_MPSAFE
   2960 	s = splnet();
   2961 #endif
   2962 	switch (cmd) {
   2963 	case SIOCSIFMEDIA:
   2964 	case SIOCGIFMEDIA:
   2965 		WM_CORE_LOCK(sc);
   2966 		/* Flow control requires full-duplex mode. */
   2967 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2968 		    (ifr->ifr_media & IFM_FDX) == 0)
   2969 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2970 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2971 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2972 				/* We can do both TXPAUSE and RXPAUSE. */
   2973 				ifr->ifr_media |=
   2974 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2975 			}
   2976 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2977 		}
   2978 		WM_CORE_UNLOCK(sc);
   2979 #ifdef WM_MPSAFE
   2980 		s = splnet();
   2981 #endif
   2982 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2983 #ifdef WM_MPSAFE
   2984 		splx(s);
   2985 #endif
   2986 		break;
   2987 	case SIOCINITIFADDR:
   2988 		WM_CORE_LOCK(sc);
   2989 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2990 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2991 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2992 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2993 			/* unicast address is first multicast entry */
   2994 			wm_set_filter(sc);
   2995 			error = 0;
   2996 			WM_CORE_UNLOCK(sc);
   2997 			break;
   2998 		}
   2999 		WM_CORE_UNLOCK(sc);
   3000 		/*FALLTHROUGH*/
   3001 	default:
   3002 #ifdef WM_MPSAFE
   3003 		s = splnet();
   3004 #endif
   3005 		/* It may call wm_start, so unlock here */
   3006 		error = ether_ioctl(ifp, cmd, data);
   3007 #ifdef WM_MPSAFE
   3008 		splx(s);
   3009 #endif
   3010 		if (error != ENETRESET)
   3011 			break;
   3012 
   3013 		error = 0;
   3014 
   3015 		if (cmd == SIOCSIFCAP) {
   3016 			error = (*ifp->if_init)(ifp);
   3017 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3018 			;
   3019 		else if (ifp->if_flags & IFF_RUNNING) {
   3020 			/*
   3021 			 * Multicast list has changed; set the hardware filter
   3022 			 * accordingly.
   3023 			 */
   3024 			WM_CORE_LOCK(sc);
   3025 			wm_set_filter(sc);
   3026 			WM_CORE_UNLOCK(sc);
   3027 		}
   3028 		break;
   3029 	}
   3030 
   3031 #ifndef WM_MPSAFE
   3032 	splx(s);
   3033 #endif
   3034 	return error;
   3035 }
   3036 
   3037 /* MAC address related */
   3038 
   3039 /*
   3040  * Get the offset of MAC address and return it.
   3041  * If error occured, use offset 0.
   3042  */
   3043 static uint16_t
   3044 wm_check_alt_mac_addr(struct wm_softc *sc)
   3045 {
   3046 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3047 	uint16_t offset = NVM_OFF_MACADDR;
   3048 
   3049 	/* Try to read alternative MAC address pointer */
   3050 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3051 		return 0;
   3052 
   3053 	/* Check pointer if it's valid or not. */
   3054 	if ((offset == 0x0000) || (offset == 0xffff))
   3055 		return 0;
   3056 
   3057 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3058 	/*
   3059 	 * Check whether alternative MAC address is valid or not.
   3060 	 * Some cards have non 0xffff pointer but those don't use
   3061 	 * alternative MAC address in reality.
   3062 	 *
   3063 	 * Check whether the broadcast bit is set or not.
   3064 	 */
   3065 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3066 		if (((myea[0] & 0xff) & 0x01) == 0)
   3067 			return offset; /* Found */
   3068 
   3069 	/* Not found */
   3070 	return 0;
   3071 }
   3072 
   3073 static int
   3074 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3075 {
   3076 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3077 	uint16_t offset = NVM_OFF_MACADDR;
   3078 	int do_invert = 0;
   3079 
   3080 	switch (sc->sc_type) {
   3081 	case WM_T_82580:
   3082 	case WM_T_I350:
   3083 	case WM_T_I354:
   3084 		/* EEPROM Top Level Partitioning */
   3085 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3086 		break;
   3087 	case WM_T_82571:
   3088 	case WM_T_82575:
   3089 	case WM_T_82576:
   3090 	case WM_T_80003:
   3091 	case WM_T_I210:
   3092 	case WM_T_I211:
   3093 		offset = wm_check_alt_mac_addr(sc);
   3094 		if (offset == 0)
   3095 			if ((sc->sc_funcid & 0x01) == 1)
   3096 				do_invert = 1;
   3097 		break;
   3098 	default:
   3099 		if ((sc->sc_funcid & 0x01) == 1)
   3100 			do_invert = 1;
   3101 		break;
   3102 	}
   3103 
   3104 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3105 		goto bad;
   3106 
   3107 	enaddr[0] = myea[0] & 0xff;
   3108 	enaddr[1] = myea[0] >> 8;
   3109 	enaddr[2] = myea[1] & 0xff;
   3110 	enaddr[3] = myea[1] >> 8;
   3111 	enaddr[4] = myea[2] & 0xff;
   3112 	enaddr[5] = myea[2] >> 8;
   3113 
   3114 	/*
   3115 	 * Toggle the LSB of the MAC address on the second port
   3116 	 * of some dual port cards.
   3117 	 */
   3118 	if (do_invert != 0)
   3119 		enaddr[5] ^= 1;
   3120 
   3121 	return 0;
   3122 
   3123  bad:
   3124 	return -1;
   3125 }
   3126 
   3127 /*
   3128  * wm_set_ral:
   3129  *
   3130  *	Set an entery in the receive address list.
   3131  */
   3132 static void
   3133 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3134 {
   3135 	uint32_t ral_lo, ral_hi;
   3136 
   3137 	if (enaddr != NULL) {
   3138 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3139 		    (enaddr[3] << 24);
   3140 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3141 		ral_hi |= RAL_AV;
   3142 	} else {
   3143 		ral_lo = 0;
   3144 		ral_hi = 0;
   3145 	}
   3146 
   3147 	if (sc->sc_type >= WM_T_82544) {
   3148 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3149 		    ral_lo);
   3150 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3151 		    ral_hi);
   3152 	} else {
   3153 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3154 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3155 	}
   3156 }
   3157 
   3158 /*
   3159  * wm_mchash:
   3160  *
   3161  *	Compute the hash of the multicast address for the 4096-bit
   3162  *	multicast filter.
   3163  */
   3164 static uint32_t
   3165 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3166 {
   3167 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3168 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3169 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3170 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3171 	uint32_t hash;
   3172 
   3173 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3174 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3175 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3176 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3177 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3178 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3179 		return (hash & 0x3ff);
   3180 	}
   3181 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3182 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3183 
   3184 	return (hash & 0xfff);
   3185 }
   3186 
   3187 /*
   3188  * wm_set_filter:
   3189  *
   3190  *	Set up the receive filter.
   3191  */
   3192 static void
   3193 wm_set_filter(struct wm_softc *sc)
   3194 {
   3195 	struct ethercom *ec = &sc->sc_ethercom;
   3196 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3197 	struct ether_multi *enm;
   3198 	struct ether_multistep step;
   3199 	bus_addr_t mta_reg;
   3200 	uint32_t hash, reg, bit;
   3201 	int i, size, ralmax;
   3202 
   3203 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3204 		device_xname(sc->sc_dev), __func__));
   3205 
   3206 	if (sc->sc_type >= WM_T_82544)
   3207 		mta_reg = WMREG_CORDOVA_MTA;
   3208 	else
   3209 		mta_reg = WMREG_MTA;
   3210 
   3211 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3212 
   3213 	if (ifp->if_flags & IFF_BROADCAST)
   3214 		sc->sc_rctl |= RCTL_BAM;
   3215 	if (ifp->if_flags & IFF_PROMISC) {
   3216 		sc->sc_rctl |= RCTL_UPE;
   3217 		goto allmulti;
   3218 	}
   3219 
   3220 	/*
   3221 	 * Set the station address in the first RAL slot, and
   3222 	 * clear the remaining slots.
   3223 	 */
   3224 	if (sc->sc_type == WM_T_ICH8)
   3225 		size = WM_RAL_TABSIZE_ICH8 -1;
   3226 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3227 	    || (sc->sc_type == WM_T_PCH))
   3228 		size = WM_RAL_TABSIZE_ICH8;
   3229 	else if (sc->sc_type == WM_T_PCH2)
   3230 		size = WM_RAL_TABSIZE_PCH2;
   3231 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3232 		size = WM_RAL_TABSIZE_PCH_LPT;
   3233 	else if (sc->sc_type == WM_T_82575)
   3234 		size = WM_RAL_TABSIZE_82575;
   3235 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3236 		size = WM_RAL_TABSIZE_82576;
   3237 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3238 		size = WM_RAL_TABSIZE_I350;
   3239 	else
   3240 		size = WM_RAL_TABSIZE;
   3241 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3242 
   3243 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3244 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3245 		switch (i) {
   3246 		case 0:
   3247 			/* We can use all entries */
   3248 			ralmax = size;
   3249 			break;
   3250 		case 1:
   3251 			/* Only RAR[0] */
   3252 			ralmax = 1;
   3253 			break;
   3254 		default:
   3255 			/* available SHRA + RAR[0] */
   3256 			ralmax = i + 1;
   3257 		}
   3258 	} else
   3259 		ralmax = size;
   3260 	for (i = 1; i < size; i++) {
   3261 		if (i < ralmax)
   3262 			wm_set_ral(sc, NULL, i);
   3263 	}
   3264 
   3265 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3266 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3267 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3268 	    || (sc->sc_type == WM_T_PCH_SPT))
   3269 		size = WM_ICH8_MC_TABSIZE;
   3270 	else
   3271 		size = WM_MC_TABSIZE;
   3272 	/* Clear out the multicast table. */
   3273 	for (i = 0; i < size; i++)
   3274 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3275 
   3276 	ETHER_LOCK(ec);
   3277 	ETHER_FIRST_MULTI(step, ec, enm);
   3278 	while (enm != NULL) {
   3279 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3280 			ETHER_UNLOCK(ec);
   3281 			/*
   3282 			 * We must listen to a range of multicast addresses.
   3283 			 * For now, just accept all multicasts, rather than
   3284 			 * trying to set only those filter bits needed to match
   3285 			 * the range.  (At this time, the only use of address
   3286 			 * ranges is for IP multicast routing, for which the
   3287 			 * range is big enough to require all bits set.)
   3288 			 */
   3289 			goto allmulti;
   3290 		}
   3291 
   3292 		hash = wm_mchash(sc, enm->enm_addrlo);
   3293 
   3294 		reg = (hash >> 5);
   3295 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3296 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3297 		    || (sc->sc_type == WM_T_PCH2)
   3298 		    || (sc->sc_type == WM_T_PCH_LPT)
   3299 		    || (sc->sc_type == WM_T_PCH_SPT))
   3300 			reg &= 0x1f;
   3301 		else
   3302 			reg &= 0x7f;
   3303 		bit = hash & 0x1f;
   3304 
   3305 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3306 		hash |= 1U << bit;
   3307 
   3308 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3309 			/*
   3310 			 * 82544 Errata 9: Certain register cannot be written
   3311 			 * with particular alignments in PCI-X bus operation
   3312 			 * (FCAH, MTA and VFTA).
   3313 			 */
   3314 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3315 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3316 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3317 		} else
   3318 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3319 
   3320 		ETHER_NEXT_MULTI(step, enm);
   3321 	}
   3322 	ETHER_UNLOCK(ec);
   3323 
   3324 	ifp->if_flags &= ~IFF_ALLMULTI;
   3325 	goto setit;
   3326 
   3327  allmulti:
   3328 	ifp->if_flags |= IFF_ALLMULTI;
   3329 	sc->sc_rctl |= RCTL_MPE;
   3330 
   3331  setit:
   3332 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3333 }
   3334 
   3335 /* Reset and init related */
   3336 
   3337 static void
   3338 wm_set_vlan(struct wm_softc *sc)
   3339 {
   3340 
   3341 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3342 		device_xname(sc->sc_dev), __func__));
   3343 
   3344 	/* Deal with VLAN enables. */
   3345 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3346 		sc->sc_ctrl |= CTRL_VME;
   3347 	else
   3348 		sc->sc_ctrl &= ~CTRL_VME;
   3349 
   3350 	/* Write the control registers. */
   3351 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3352 }
   3353 
   3354 static void
   3355 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3356 {
   3357 	uint32_t gcr;
   3358 	pcireg_t ctrl2;
   3359 
   3360 	gcr = CSR_READ(sc, WMREG_GCR);
   3361 
   3362 	/* Only take action if timeout value is defaulted to 0 */
   3363 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3364 		goto out;
   3365 
   3366 	if ((gcr & GCR_CAP_VER2) == 0) {
   3367 		gcr |= GCR_CMPL_TMOUT_10MS;
   3368 		goto out;
   3369 	}
   3370 
   3371 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3372 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3373 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3374 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3375 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3376 
   3377 out:
   3378 	/* Disable completion timeout resend */
   3379 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3380 
   3381 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3382 }
   3383 
   3384 void
   3385 wm_get_auto_rd_done(struct wm_softc *sc)
   3386 {
   3387 	int i;
   3388 
   3389 	/* wait for eeprom to reload */
   3390 	switch (sc->sc_type) {
   3391 	case WM_T_82571:
   3392 	case WM_T_82572:
   3393 	case WM_T_82573:
   3394 	case WM_T_82574:
   3395 	case WM_T_82583:
   3396 	case WM_T_82575:
   3397 	case WM_T_82576:
   3398 	case WM_T_82580:
   3399 	case WM_T_I350:
   3400 	case WM_T_I354:
   3401 	case WM_T_I210:
   3402 	case WM_T_I211:
   3403 	case WM_T_80003:
   3404 	case WM_T_ICH8:
   3405 	case WM_T_ICH9:
   3406 		for (i = 0; i < 10; i++) {
   3407 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3408 				break;
   3409 			delay(1000);
   3410 		}
   3411 		if (i == 10) {
   3412 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3413 			    "complete\n", device_xname(sc->sc_dev));
   3414 		}
   3415 		break;
   3416 	default:
   3417 		break;
   3418 	}
   3419 }
   3420 
   3421 void
   3422 wm_lan_init_done(struct wm_softc *sc)
   3423 {
   3424 	uint32_t reg = 0;
   3425 	int i;
   3426 
   3427 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3428 		device_xname(sc->sc_dev), __func__));
   3429 
   3430 	/* Wait for eeprom to reload */
   3431 	switch (sc->sc_type) {
   3432 	case WM_T_ICH10:
   3433 	case WM_T_PCH:
   3434 	case WM_T_PCH2:
   3435 	case WM_T_PCH_LPT:
   3436 	case WM_T_PCH_SPT:
   3437 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3438 			reg = CSR_READ(sc, WMREG_STATUS);
   3439 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3440 				break;
   3441 			delay(100);
   3442 		}
   3443 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3444 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3445 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3446 		}
   3447 		break;
   3448 	default:
   3449 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3450 		    __func__);
   3451 		break;
   3452 	}
   3453 
   3454 	reg &= ~STATUS_LAN_INIT_DONE;
   3455 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3456 }
   3457 
   3458 void
   3459 wm_get_cfg_done(struct wm_softc *sc)
   3460 {
   3461 	int mask;
   3462 	uint32_t reg;
   3463 	int i;
   3464 
   3465 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3466 		device_xname(sc->sc_dev), __func__));
   3467 
   3468 	/* Wait for eeprom to reload */
   3469 	switch (sc->sc_type) {
   3470 	case WM_T_82542_2_0:
   3471 	case WM_T_82542_2_1:
   3472 		/* null */
   3473 		break;
   3474 	case WM_T_82543:
   3475 	case WM_T_82544:
   3476 	case WM_T_82540:
   3477 	case WM_T_82545:
   3478 	case WM_T_82545_3:
   3479 	case WM_T_82546:
   3480 	case WM_T_82546_3:
   3481 	case WM_T_82541:
   3482 	case WM_T_82541_2:
   3483 	case WM_T_82547:
   3484 	case WM_T_82547_2:
   3485 	case WM_T_82573:
   3486 	case WM_T_82574:
   3487 	case WM_T_82583:
   3488 		/* generic */
   3489 		delay(10*1000);
   3490 		break;
   3491 	case WM_T_80003:
   3492 	case WM_T_82571:
   3493 	case WM_T_82572:
   3494 	case WM_T_82575:
   3495 	case WM_T_82576:
   3496 	case WM_T_82580:
   3497 	case WM_T_I350:
   3498 	case WM_T_I354:
   3499 	case WM_T_I210:
   3500 	case WM_T_I211:
   3501 		if (sc->sc_type == WM_T_82571) {
   3502 			/* Only 82571 shares port 0 */
   3503 			mask = EEMNGCTL_CFGDONE_0;
   3504 		} else
   3505 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3506 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3507 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3508 				break;
   3509 			delay(1000);
   3510 		}
   3511 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3512 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3513 				device_xname(sc->sc_dev), __func__));
   3514 		}
   3515 		break;
   3516 	case WM_T_ICH8:
   3517 	case WM_T_ICH9:
   3518 	case WM_T_ICH10:
   3519 	case WM_T_PCH:
   3520 	case WM_T_PCH2:
   3521 	case WM_T_PCH_LPT:
   3522 	case WM_T_PCH_SPT:
   3523 		delay(10*1000);
   3524 		if (sc->sc_type >= WM_T_ICH10)
   3525 			wm_lan_init_done(sc);
   3526 		else
   3527 			wm_get_auto_rd_done(sc);
   3528 
   3529 		reg = CSR_READ(sc, WMREG_STATUS);
   3530 		if ((reg & STATUS_PHYRA) != 0)
   3531 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3532 		break;
   3533 	default:
   3534 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3535 		    __func__);
   3536 		break;
   3537 	}
   3538 }
   3539 
   3540 /* Init hardware bits */
   3541 void
   3542 wm_initialize_hardware_bits(struct wm_softc *sc)
   3543 {
   3544 	uint32_t tarc0, tarc1, reg;
   3545 
   3546 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3547 		device_xname(sc->sc_dev), __func__));
   3548 
   3549 	/* For 82571 variant, 80003 and ICHs */
   3550 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3551 	    || (sc->sc_type >= WM_T_80003)) {
   3552 
   3553 		/* Transmit Descriptor Control 0 */
   3554 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3555 		reg |= TXDCTL_COUNT_DESC;
   3556 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3557 
   3558 		/* Transmit Descriptor Control 1 */
   3559 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3560 		reg |= TXDCTL_COUNT_DESC;
   3561 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3562 
   3563 		/* TARC0 */
   3564 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3565 		switch (sc->sc_type) {
   3566 		case WM_T_82571:
   3567 		case WM_T_82572:
   3568 		case WM_T_82573:
   3569 		case WM_T_82574:
   3570 		case WM_T_82583:
   3571 		case WM_T_80003:
   3572 			/* Clear bits 30..27 */
   3573 			tarc0 &= ~__BITS(30, 27);
   3574 			break;
   3575 		default:
   3576 			break;
   3577 		}
   3578 
   3579 		switch (sc->sc_type) {
   3580 		case WM_T_82571:
   3581 		case WM_T_82572:
   3582 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3583 
   3584 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3585 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3586 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3587 			/* 8257[12] Errata No.7 */
   3588 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3589 
   3590 			/* TARC1 bit 28 */
   3591 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3592 				tarc1 &= ~__BIT(28);
   3593 			else
   3594 				tarc1 |= __BIT(28);
   3595 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3596 
   3597 			/*
   3598 			 * 8257[12] Errata No.13
   3599 			 * Disable Dyamic Clock Gating.
   3600 			 */
   3601 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3602 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3603 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3604 			break;
   3605 		case WM_T_82573:
   3606 		case WM_T_82574:
   3607 		case WM_T_82583:
   3608 			if ((sc->sc_type == WM_T_82574)
   3609 			    || (sc->sc_type == WM_T_82583))
   3610 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3611 
   3612 			/* Extended Device Control */
   3613 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3614 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3615 			reg |= __BIT(22);	/* Set bit 22 */
   3616 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3617 
   3618 			/* Device Control */
   3619 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3620 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3621 
   3622 			/* PCIe Control Register */
   3623 			/*
   3624 			 * 82573 Errata (unknown).
   3625 			 *
   3626 			 * 82574 Errata 25 and 82583 Errata 12
   3627 			 * "Dropped Rx Packets":
   3628 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3629 			 */
   3630 			reg = CSR_READ(sc, WMREG_GCR);
   3631 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3632 			CSR_WRITE(sc, WMREG_GCR, reg);
   3633 
   3634 			if ((sc->sc_type == WM_T_82574)
   3635 			    || (sc->sc_type == WM_T_82583)) {
   3636 				/*
   3637 				 * Document says this bit must be set for
   3638 				 * proper operation.
   3639 				 */
   3640 				reg = CSR_READ(sc, WMREG_GCR);
   3641 				reg |= __BIT(22);
   3642 				CSR_WRITE(sc, WMREG_GCR, reg);
   3643 
   3644 				/*
   3645 				 * Apply workaround for hardware errata
   3646 				 * documented in errata docs Fixes issue where
   3647 				 * some error prone or unreliable PCIe
   3648 				 * completions are occurring, particularly
   3649 				 * with ASPM enabled. Without fix, issue can
   3650 				 * cause Tx timeouts.
   3651 				 */
   3652 				reg = CSR_READ(sc, WMREG_GCR2);
   3653 				reg |= __BIT(0);
   3654 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3655 			}
   3656 			break;
   3657 		case WM_T_80003:
   3658 			/* TARC0 */
   3659 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3660 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3661 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3662 
   3663 			/* TARC1 bit 28 */
   3664 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3665 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3666 				tarc1 &= ~__BIT(28);
   3667 			else
   3668 				tarc1 |= __BIT(28);
   3669 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3670 			break;
   3671 		case WM_T_ICH8:
   3672 		case WM_T_ICH9:
   3673 		case WM_T_ICH10:
   3674 		case WM_T_PCH:
   3675 		case WM_T_PCH2:
   3676 		case WM_T_PCH_LPT:
   3677 		case WM_T_PCH_SPT:
   3678 			/* TARC0 */
   3679 			if ((sc->sc_type == WM_T_ICH8)
   3680 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3681 				/* Set TARC0 bits 29 and 28 */
   3682 				tarc0 |= __BITS(29, 28);
   3683 			}
   3684 			/* Set TARC0 bits 23,24,26,27 */
   3685 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3686 
   3687 			/* CTRL_EXT */
   3688 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3689 			reg |= __BIT(22);	/* Set bit 22 */
   3690 			/*
   3691 			 * Enable PHY low-power state when MAC is at D3
   3692 			 * w/o WoL
   3693 			 */
   3694 			if (sc->sc_type >= WM_T_PCH)
   3695 				reg |= CTRL_EXT_PHYPDEN;
   3696 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3697 
   3698 			/* TARC1 */
   3699 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3700 			/* bit 28 */
   3701 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3702 				tarc1 &= ~__BIT(28);
   3703 			else
   3704 				tarc1 |= __BIT(28);
   3705 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3706 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3707 
   3708 			/* Device Status */
   3709 			if (sc->sc_type == WM_T_ICH8) {
   3710 				reg = CSR_READ(sc, WMREG_STATUS);
   3711 				reg &= ~__BIT(31);
   3712 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3713 
   3714 			}
   3715 
   3716 			/* IOSFPC */
   3717 			if (sc->sc_type == WM_T_PCH_SPT) {
   3718 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3719 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3720 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3721 			}
   3722 			/*
   3723 			 * Work-around descriptor data corruption issue during
   3724 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3725 			 * capability.
   3726 			 */
   3727 			reg = CSR_READ(sc, WMREG_RFCTL);
   3728 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3729 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3730 			break;
   3731 		default:
   3732 			break;
   3733 		}
   3734 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3735 
   3736 		switch (sc->sc_type) {
   3737 		/*
   3738 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3739 		 * Avoid RSS Hash Value bug.
   3740 		 */
   3741 		case WM_T_82571:
   3742 		case WM_T_82572:
   3743 		case WM_T_82573:
   3744 		case WM_T_80003:
   3745 		case WM_T_ICH8:
   3746 			reg = CSR_READ(sc, WMREG_RFCTL);
   3747 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3748 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3749 			break;
   3750 		case WM_T_82574:
   3751 			/* use extened Rx descriptor. */
   3752 			reg = CSR_READ(sc, WMREG_RFCTL);
   3753 			reg |= WMREG_RFCTL_EXSTEN;
   3754 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3755 			break;
   3756 		default:
   3757 			break;
   3758 		}
   3759 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   3760 		/*
   3761 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3762 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3763 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3764 		 * Correctly by the Device"
   3765 		 *
   3766 		 * I354(C2000) Errata AVR53:
   3767 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3768 		 * Hang"
   3769 		 */
   3770 		reg = CSR_READ(sc, WMREG_RFCTL);
   3771 		reg |= WMREG_RFCTL_IPV6EXDIS;
   3772 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   3773 	}
   3774 }
   3775 
   3776 static uint32_t
   3777 wm_rxpbs_adjust_82580(uint32_t val)
   3778 {
   3779 	uint32_t rv = 0;
   3780 
   3781 	if (val < __arraycount(wm_82580_rxpbs_table))
   3782 		rv = wm_82580_rxpbs_table[val];
   3783 
   3784 	return rv;
   3785 }
   3786 
   3787 /*
   3788  * wm_reset_phy:
   3789  *
   3790  *	generic PHY reset function.
   3791  *	Same as e1000_phy_hw_reset_generic()
   3792  */
   3793 static void
   3794 wm_reset_phy(struct wm_softc *sc)
   3795 {
   3796 	uint32_t reg;
   3797 
   3798 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3799 		device_xname(sc->sc_dev), __func__));
   3800 	if (wm_phy_resetisblocked(sc))
   3801 		return;
   3802 
   3803 	sc->phy.acquire(sc);
   3804 
   3805 	reg = CSR_READ(sc, WMREG_CTRL);
   3806 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3807 	CSR_WRITE_FLUSH(sc);
   3808 
   3809 	delay(sc->phy.reset_delay_us);
   3810 
   3811 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3812 	CSR_WRITE_FLUSH(sc);
   3813 
   3814 	delay(150);
   3815 
   3816 	sc->phy.release(sc);
   3817 
   3818 	wm_get_cfg_done(sc);
   3819 }
   3820 
   3821 static void
   3822 wm_flush_desc_rings(struct wm_softc *sc)
   3823 {
   3824 	pcireg_t preg;
   3825 	uint32_t reg;
   3826 	int nexttx;
   3827 
   3828 	/* First, disable MULR fix in FEXTNVM11 */
   3829 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3830 	reg |= FEXTNVM11_DIS_MULRFIX;
   3831 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3832 
   3833 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3834 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3835 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3836 		struct wm_txqueue *txq;
   3837 		wiseman_txdesc_t *txd;
   3838 
   3839 		/* TX */
   3840 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3841 		    device_xname(sc->sc_dev), preg, reg);
   3842 		reg = CSR_READ(sc, WMREG_TCTL);
   3843 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3844 
   3845 		txq = &sc->sc_queue[0].wmq_txq;
   3846 		nexttx = txq->txq_next;
   3847 		txd = &txq->txq_descs[nexttx];
   3848 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3849 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3850 		txd->wtx_fields.wtxu_status = 0;
   3851 		txd->wtx_fields.wtxu_options = 0;
   3852 		txd->wtx_fields.wtxu_vlan = 0;
   3853 
   3854 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3855 			BUS_SPACE_BARRIER_WRITE);
   3856 
   3857 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3858 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3859 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3860 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3861 		delay(250);
   3862 	}
   3863 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3864 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3865 		uint32_t rctl;
   3866 
   3867 		/* RX */
   3868 		printf("%s: Need RX flush (reg = %08x)\n",
   3869 		    device_xname(sc->sc_dev), preg);
   3870 		rctl = CSR_READ(sc, WMREG_RCTL);
   3871 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3872 		CSR_WRITE_FLUSH(sc);
   3873 		delay(150);
   3874 
   3875 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3876 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3877 		reg &= 0xffffc000;
   3878 		/*
   3879 		 * update thresholds: prefetch threshold to 31, host threshold
   3880 		 * to 1 and make sure the granularity is "descriptors" and not
   3881 		 * "cache lines"
   3882 		 */
   3883 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3884 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3885 
   3886 		/*
   3887 		 * momentarily enable the RX ring for the changes to take
   3888 		 * effect
   3889 		 */
   3890 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3891 		CSR_WRITE_FLUSH(sc);
   3892 		delay(150);
   3893 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3894 	}
   3895 }
   3896 
   3897 /*
   3898  * wm_reset:
   3899  *
   3900  *	Reset the i82542 chip.
   3901  */
   3902 static void
   3903 wm_reset(struct wm_softc *sc)
   3904 {
   3905 	int phy_reset = 0;
   3906 	int i, error = 0;
   3907 	uint32_t reg;
   3908 
   3909 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3910 		device_xname(sc->sc_dev), __func__));
   3911 	KASSERT(sc->sc_type != 0);
   3912 
   3913 	/*
   3914 	 * Allocate on-chip memory according to the MTU size.
   3915 	 * The Packet Buffer Allocation register must be written
   3916 	 * before the chip is reset.
   3917 	 */
   3918 	switch (sc->sc_type) {
   3919 	case WM_T_82547:
   3920 	case WM_T_82547_2:
   3921 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3922 		    PBA_22K : PBA_30K;
   3923 		for (i = 0; i < sc->sc_nqueues; i++) {
   3924 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3925 			txq->txq_fifo_head = 0;
   3926 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3927 			txq->txq_fifo_size =
   3928 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3929 			txq->txq_fifo_stall = 0;
   3930 		}
   3931 		break;
   3932 	case WM_T_82571:
   3933 	case WM_T_82572:
   3934 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3935 	case WM_T_80003:
   3936 		sc->sc_pba = PBA_32K;
   3937 		break;
   3938 	case WM_T_82573:
   3939 		sc->sc_pba = PBA_12K;
   3940 		break;
   3941 	case WM_T_82574:
   3942 	case WM_T_82583:
   3943 		sc->sc_pba = PBA_20K;
   3944 		break;
   3945 	case WM_T_82576:
   3946 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3947 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3948 		break;
   3949 	case WM_T_82580:
   3950 	case WM_T_I350:
   3951 	case WM_T_I354:
   3952 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3953 		break;
   3954 	case WM_T_I210:
   3955 	case WM_T_I211:
   3956 		sc->sc_pba = PBA_34K;
   3957 		break;
   3958 	case WM_T_ICH8:
   3959 		/* Workaround for a bit corruption issue in FIFO memory */
   3960 		sc->sc_pba = PBA_8K;
   3961 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3962 		break;
   3963 	case WM_T_ICH9:
   3964 	case WM_T_ICH10:
   3965 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3966 		    PBA_14K : PBA_10K;
   3967 		break;
   3968 	case WM_T_PCH:
   3969 	case WM_T_PCH2:
   3970 	case WM_T_PCH_LPT:
   3971 	case WM_T_PCH_SPT:
   3972 		sc->sc_pba = PBA_26K;
   3973 		break;
   3974 	default:
   3975 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3976 		    PBA_40K : PBA_48K;
   3977 		break;
   3978 	}
   3979 	/*
   3980 	 * Only old or non-multiqueue devices have the PBA register
   3981 	 * XXX Need special handling for 82575.
   3982 	 */
   3983 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3984 	    || (sc->sc_type == WM_T_82575))
   3985 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3986 
   3987 	/* Prevent the PCI-E bus from sticking */
   3988 	if (sc->sc_flags & WM_F_PCIE) {
   3989 		int timeout = 800;
   3990 
   3991 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3992 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3993 
   3994 		while (timeout--) {
   3995 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3996 			    == 0)
   3997 				break;
   3998 			delay(100);
   3999 		}
   4000 	}
   4001 
   4002 	/* Set the completion timeout for interface */
   4003 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4004 	    || (sc->sc_type == WM_T_82580)
   4005 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4006 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4007 		wm_set_pcie_completion_timeout(sc);
   4008 
   4009 	/* Clear interrupt */
   4010 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4011 	if (sc->sc_nintrs > 1) {
   4012 		if (sc->sc_type != WM_T_82574) {
   4013 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4014 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4015 		} else {
   4016 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4017 		}
   4018 	}
   4019 
   4020 	/* Stop the transmit and receive processes. */
   4021 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4022 	sc->sc_rctl &= ~RCTL_EN;
   4023 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4024 	CSR_WRITE_FLUSH(sc);
   4025 
   4026 	/* XXX set_tbi_sbp_82543() */
   4027 
   4028 	delay(10*1000);
   4029 
   4030 	/* Must acquire the MDIO ownership before MAC reset */
   4031 	switch (sc->sc_type) {
   4032 	case WM_T_82573:
   4033 	case WM_T_82574:
   4034 	case WM_T_82583:
   4035 		error = wm_get_hw_semaphore_82573(sc);
   4036 		break;
   4037 	default:
   4038 		break;
   4039 	}
   4040 
   4041 	/*
   4042 	 * 82541 Errata 29? & 82547 Errata 28?
   4043 	 * See also the description about PHY_RST bit in CTRL register
   4044 	 * in 8254x_GBe_SDM.pdf.
   4045 	 */
   4046 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4047 		CSR_WRITE(sc, WMREG_CTRL,
   4048 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4049 		CSR_WRITE_FLUSH(sc);
   4050 		delay(5000);
   4051 	}
   4052 
   4053 	switch (sc->sc_type) {
   4054 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4055 	case WM_T_82541:
   4056 	case WM_T_82541_2:
   4057 	case WM_T_82547:
   4058 	case WM_T_82547_2:
   4059 		/*
   4060 		 * On some chipsets, a reset through a memory-mapped write
   4061 		 * cycle can cause the chip to reset before completing the
   4062 		 * write cycle.  This causes major headache that can be
   4063 		 * avoided by issuing the reset via indirect register writes
   4064 		 * through I/O space.
   4065 		 *
   4066 		 * So, if we successfully mapped the I/O BAR at attach time,
   4067 		 * use that.  Otherwise, try our luck with a memory-mapped
   4068 		 * reset.
   4069 		 */
   4070 		if (sc->sc_flags & WM_F_IOH_VALID)
   4071 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4072 		else
   4073 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4074 		break;
   4075 	case WM_T_82545_3:
   4076 	case WM_T_82546_3:
   4077 		/* Use the shadow control register on these chips. */
   4078 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4079 		break;
   4080 	case WM_T_80003:
   4081 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4082 		sc->phy.acquire(sc);
   4083 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4084 		sc->phy.release(sc);
   4085 		break;
   4086 	case WM_T_ICH8:
   4087 	case WM_T_ICH9:
   4088 	case WM_T_ICH10:
   4089 	case WM_T_PCH:
   4090 	case WM_T_PCH2:
   4091 	case WM_T_PCH_LPT:
   4092 	case WM_T_PCH_SPT:
   4093 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4094 		if (wm_phy_resetisblocked(sc) == false) {
   4095 			/*
   4096 			 * Gate automatic PHY configuration by hardware on
   4097 			 * non-managed 82579
   4098 			 */
   4099 			if ((sc->sc_type == WM_T_PCH2)
   4100 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4101 				== 0))
   4102 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4103 
   4104 			reg |= CTRL_PHY_RESET;
   4105 			phy_reset = 1;
   4106 		} else
   4107 			printf("XXX reset is blocked!!!\n");
   4108 		sc->phy.acquire(sc);
   4109 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4110 		/* Don't insert a completion barrier when reset */
   4111 		delay(20*1000);
   4112 		mutex_exit(sc->sc_ich_phymtx);
   4113 		break;
   4114 	case WM_T_82580:
   4115 	case WM_T_I350:
   4116 	case WM_T_I354:
   4117 	case WM_T_I210:
   4118 	case WM_T_I211:
   4119 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4120 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4121 			CSR_WRITE_FLUSH(sc);
   4122 		delay(5000);
   4123 		break;
   4124 	case WM_T_82542_2_0:
   4125 	case WM_T_82542_2_1:
   4126 	case WM_T_82543:
   4127 	case WM_T_82540:
   4128 	case WM_T_82545:
   4129 	case WM_T_82546:
   4130 	case WM_T_82571:
   4131 	case WM_T_82572:
   4132 	case WM_T_82573:
   4133 	case WM_T_82574:
   4134 	case WM_T_82575:
   4135 	case WM_T_82576:
   4136 	case WM_T_82583:
   4137 	default:
   4138 		/* Everything else can safely use the documented method. */
   4139 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4140 		break;
   4141 	}
   4142 
   4143 	/* Must release the MDIO ownership after MAC reset */
   4144 	switch (sc->sc_type) {
   4145 	case WM_T_82573:
   4146 	case WM_T_82574:
   4147 	case WM_T_82583:
   4148 		if (error == 0)
   4149 			wm_put_hw_semaphore_82573(sc);
   4150 		break;
   4151 	default:
   4152 		break;
   4153 	}
   4154 
   4155 	if (phy_reset != 0)
   4156 		wm_get_cfg_done(sc);
   4157 
   4158 	/* reload EEPROM */
   4159 	switch (sc->sc_type) {
   4160 	case WM_T_82542_2_0:
   4161 	case WM_T_82542_2_1:
   4162 	case WM_T_82543:
   4163 	case WM_T_82544:
   4164 		delay(10);
   4165 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4166 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4167 		CSR_WRITE_FLUSH(sc);
   4168 		delay(2000);
   4169 		break;
   4170 	case WM_T_82540:
   4171 	case WM_T_82545:
   4172 	case WM_T_82545_3:
   4173 	case WM_T_82546:
   4174 	case WM_T_82546_3:
   4175 		delay(5*1000);
   4176 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4177 		break;
   4178 	case WM_T_82541:
   4179 	case WM_T_82541_2:
   4180 	case WM_T_82547:
   4181 	case WM_T_82547_2:
   4182 		delay(20000);
   4183 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4184 		break;
   4185 	case WM_T_82571:
   4186 	case WM_T_82572:
   4187 	case WM_T_82573:
   4188 	case WM_T_82574:
   4189 	case WM_T_82583:
   4190 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4191 			delay(10);
   4192 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4193 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4194 			CSR_WRITE_FLUSH(sc);
   4195 		}
   4196 		/* check EECD_EE_AUTORD */
   4197 		wm_get_auto_rd_done(sc);
   4198 		/*
   4199 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4200 		 * is set.
   4201 		 */
   4202 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4203 		    || (sc->sc_type == WM_T_82583))
   4204 			delay(25*1000);
   4205 		break;
   4206 	case WM_T_82575:
   4207 	case WM_T_82576:
   4208 	case WM_T_82580:
   4209 	case WM_T_I350:
   4210 	case WM_T_I354:
   4211 	case WM_T_I210:
   4212 	case WM_T_I211:
   4213 	case WM_T_80003:
   4214 		/* check EECD_EE_AUTORD */
   4215 		wm_get_auto_rd_done(sc);
   4216 		break;
   4217 	case WM_T_ICH8:
   4218 	case WM_T_ICH9:
   4219 	case WM_T_ICH10:
   4220 	case WM_T_PCH:
   4221 	case WM_T_PCH2:
   4222 	case WM_T_PCH_LPT:
   4223 	case WM_T_PCH_SPT:
   4224 		break;
   4225 	default:
   4226 		panic("%s: unknown type\n", __func__);
   4227 	}
   4228 
   4229 	/* Check whether EEPROM is present or not */
   4230 	switch (sc->sc_type) {
   4231 	case WM_T_82575:
   4232 	case WM_T_82576:
   4233 	case WM_T_82580:
   4234 	case WM_T_I350:
   4235 	case WM_T_I354:
   4236 	case WM_T_ICH8:
   4237 	case WM_T_ICH9:
   4238 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4239 			/* Not found */
   4240 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4241 			if (sc->sc_type == WM_T_82575)
   4242 				wm_reset_init_script_82575(sc);
   4243 		}
   4244 		break;
   4245 	default:
   4246 		break;
   4247 	}
   4248 
   4249 	if ((sc->sc_type == WM_T_82580)
   4250 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4251 		/* clear global device reset status bit */
   4252 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4253 	}
   4254 
   4255 	/* Clear any pending interrupt events. */
   4256 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4257 	reg = CSR_READ(sc, WMREG_ICR);
   4258 	if (sc->sc_nintrs > 1) {
   4259 		if (sc->sc_type != WM_T_82574) {
   4260 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4261 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4262 		} else
   4263 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4264 	}
   4265 
   4266 	/* reload sc_ctrl */
   4267 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4268 
   4269 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4270 		wm_set_eee_i350(sc);
   4271 
   4272 	/* Clear the host wakeup bit after lcd reset */
   4273 	if (sc->sc_type >= WM_T_PCH) {
   4274 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4275 		    BM_PORT_GEN_CFG);
   4276 		reg &= ~BM_WUC_HOST_WU_BIT;
   4277 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4278 		    BM_PORT_GEN_CFG, reg);
   4279 	}
   4280 
   4281 	/*
   4282 	 * For PCH, this write will make sure that any noise will be detected
   4283 	 * as a CRC error and be dropped rather than show up as a bad packet
   4284 	 * to the DMA engine
   4285 	 */
   4286 	if (sc->sc_type == WM_T_PCH)
   4287 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4288 
   4289 	if (sc->sc_type >= WM_T_82544)
   4290 		CSR_WRITE(sc, WMREG_WUC, 0);
   4291 
   4292 	wm_reset_mdicnfg_82580(sc);
   4293 
   4294 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4295 		wm_pll_workaround_i210(sc);
   4296 }
   4297 
   4298 /*
   4299  * wm_add_rxbuf:
   4300  *
   4301  *	Add a receive buffer to the indiciated descriptor.
   4302  */
   4303 static int
   4304 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4305 {
   4306 	struct wm_softc *sc = rxq->rxq_sc;
   4307 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4308 	struct mbuf *m;
   4309 	int error;
   4310 
   4311 	KASSERT(mutex_owned(rxq->rxq_lock));
   4312 
   4313 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4314 	if (m == NULL)
   4315 		return ENOBUFS;
   4316 
   4317 	MCLGET(m, M_DONTWAIT);
   4318 	if ((m->m_flags & M_EXT) == 0) {
   4319 		m_freem(m);
   4320 		return ENOBUFS;
   4321 	}
   4322 
   4323 	if (rxs->rxs_mbuf != NULL)
   4324 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4325 
   4326 	rxs->rxs_mbuf = m;
   4327 
   4328 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4329 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4330 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4331 	if (error) {
   4332 		/* XXX XXX XXX */
   4333 		aprint_error_dev(sc->sc_dev,
   4334 		    "unable to load rx DMA map %d, error = %d\n",
   4335 		    idx, error);
   4336 		panic("wm_add_rxbuf");
   4337 	}
   4338 
   4339 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4340 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4341 
   4342 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4343 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4344 			wm_init_rxdesc(rxq, idx);
   4345 	} else
   4346 		wm_init_rxdesc(rxq, idx);
   4347 
   4348 	return 0;
   4349 }
   4350 
   4351 /*
   4352  * wm_rxdrain:
   4353  *
   4354  *	Drain the receive queue.
   4355  */
   4356 static void
   4357 wm_rxdrain(struct wm_rxqueue *rxq)
   4358 {
   4359 	struct wm_softc *sc = rxq->rxq_sc;
   4360 	struct wm_rxsoft *rxs;
   4361 	int i;
   4362 
   4363 	KASSERT(mutex_owned(rxq->rxq_lock));
   4364 
   4365 	for (i = 0; i < WM_NRXDESC; i++) {
   4366 		rxs = &rxq->rxq_soft[i];
   4367 		if (rxs->rxs_mbuf != NULL) {
   4368 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4369 			m_freem(rxs->rxs_mbuf);
   4370 			rxs->rxs_mbuf = NULL;
   4371 		}
   4372 	}
   4373 }
   4374 
   4375 
   4376 /*
   4377  * XXX copy from FreeBSD's sys/net/rss_config.c
   4378  */
   4379 /*
   4380  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4381  * effectiveness may be limited by algorithm choice and available entropy
   4382  * during the boot.
   4383  *
   4384  * XXXRW: And that we don't randomize it yet!
   4385  *
   4386  * This is the default Microsoft RSS specification key which is also
   4387  * the Chelsio T5 firmware default key.
   4388  */
   4389 #define RSS_KEYSIZE 40
   4390 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4391 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4392 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4393 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4394 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4395 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4396 };
   4397 
   4398 /*
   4399  * Caller must pass an array of size sizeof(rss_key).
   4400  *
   4401  * XXX
   4402  * As if_ixgbe may use this function, this function should not be
   4403  * if_wm specific function.
   4404  */
   4405 static void
   4406 wm_rss_getkey(uint8_t *key)
   4407 {
   4408 
   4409 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4410 }
   4411 
   4412 /*
   4413  * Setup registers for RSS.
   4414  *
   4415  * XXX not yet VMDq support
   4416  */
   4417 static void
   4418 wm_init_rss(struct wm_softc *sc)
   4419 {
   4420 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4421 	int i;
   4422 
   4423 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4424 
   4425 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4426 		int qid, reta_ent;
   4427 
   4428 		qid  = i % sc->sc_nqueues;
   4429 		switch(sc->sc_type) {
   4430 		case WM_T_82574:
   4431 			reta_ent = __SHIFTIN(qid,
   4432 			    RETA_ENT_QINDEX_MASK_82574);
   4433 			break;
   4434 		case WM_T_82575:
   4435 			reta_ent = __SHIFTIN(qid,
   4436 			    RETA_ENT_QINDEX1_MASK_82575);
   4437 			break;
   4438 		default:
   4439 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4440 			break;
   4441 		}
   4442 
   4443 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4444 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4445 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4446 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4447 	}
   4448 
   4449 	wm_rss_getkey((uint8_t *)rss_key);
   4450 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4451 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4452 
   4453 	if (sc->sc_type == WM_T_82574)
   4454 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4455 	else
   4456 		mrqc = MRQC_ENABLE_RSS_MQ;
   4457 
   4458 	/*
   4459 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4460 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4461 	 */
   4462 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4463 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4464 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4465 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4466 
   4467 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4468 }
   4469 
   4470 /*
   4471  * Adjust TX and RX queue numbers which the system actulally uses.
   4472  *
   4473  * The numbers are affected by below parameters.
   4474  *     - The nubmer of hardware queues
   4475  *     - The number of MSI-X vectors (= "nvectors" argument)
   4476  *     - ncpu
   4477  */
   4478 static void
   4479 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4480 {
   4481 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4482 
   4483 	if (nvectors < 2) {
   4484 		sc->sc_nqueues = 1;
   4485 		return;
   4486 	}
   4487 
   4488 	switch(sc->sc_type) {
   4489 	case WM_T_82572:
   4490 		hw_ntxqueues = 2;
   4491 		hw_nrxqueues = 2;
   4492 		break;
   4493 	case WM_T_82574:
   4494 		hw_ntxqueues = 2;
   4495 		hw_nrxqueues = 2;
   4496 		break;
   4497 	case WM_T_82575:
   4498 		hw_ntxqueues = 4;
   4499 		hw_nrxqueues = 4;
   4500 		break;
   4501 	case WM_T_82576:
   4502 		hw_ntxqueues = 16;
   4503 		hw_nrxqueues = 16;
   4504 		break;
   4505 	case WM_T_82580:
   4506 	case WM_T_I350:
   4507 	case WM_T_I354:
   4508 		hw_ntxqueues = 8;
   4509 		hw_nrxqueues = 8;
   4510 		break;
   4511 	case WM_T_I210:
   4512 		hw_ntxqueues = 4;
   4513 		hw_nrxqueues = 4;
   4514 		break;
   4515 	case WM_T_I211:
   4516 		hw_ntxqueues = 2;
   4517 		hw_nrxqueues = 2;
   4518 		break;
   4519 		/*
   4520 		 * As below ethernet controllers does not support MSI-X,
   4521 		 * this driver let them not use multiqueue.
   4522 		 *     - WM_T_80003
   4523 		 *     - WM_T_ICH8
   4524 		 *     - WM_T_ICH9
   4525 		 *     - WM_T_ICH10
   4526 		 *     - WM_T_PCH
   4527 		 *     - WM_T_PCH2
   4528 		 *     - WM_T_PCH_LPT
   4529 		 */
   4530 	default:
   4531 		hw_ntxqueues = 1;
   4532 		hw_nrxqueues = 1;
   4533 		break;
   4534 	}
   4535 
   4536 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4537 
   4538 	/*
   4539 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4540 	 * the number of queues used actually.
   4541 	 */
   4542 	if (nvectors < hw_nqueues + 1) {
   4543 		sc->sc_nqueues = nvectors - 1;
   4544 	} else {
   4545 		sc->sc_nqueues = hw_nqueues;
   4546 	}
   4547 
   4548 	/*
   4549 	 * As queues more then cpus cannot improve scaling, we limit
   4550 	 * the number of queues used actually.
   4551 	 */
   4552 	if (ncpu < sc->sc_nqueues)
   4553 		sc->sc_nqueues = ncpu;
   4554 }
   4555 
   4556 /*
   4557  * Both single interrupt MSI and INTx can use this function.
   4558  */
   4559 static int
   4560 wm_setup_legacy(struct wm_softc *sc)
   4561 {
   4562 	pci_chipset_tag_t pc = sc->sc_pc;
   4563 	const char *intrstr = NULL;
   4564 	char intrbuf[PCI_INTRSTR_LEN];
   4565 	int error;
   4566 
   4567 	error = wm_alloc_txrx_queues(sc);
   4568 	if (error) {
   4569 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4570 		    error);
   4571 		return ENOMEM;
   4572 	}
   4573 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4574 	    sizeof(intrbuf));
   4575 #ifdef WM_MPSAFE
   4576 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4577 #endif
   4578 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4579 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4580 	if (sc->sc_ihs[0] == NULL) {
   4581 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4582 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4583 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4584 		return ENOMEM;
   4585 	}
   4586 
   4587 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4588 	sc->sc_nintrs = 1;
   4589 	return 0;
   4590 }
   4591 
   4592 static int
   4593 wm_setup_msix(struct wm_softc *sc)
   4594 {
   4595 	void *vih;
   4596 	kcpuset_t *affinity;
   4597 	int qidx, error, intr_idx, txrx_established;
   4598 	pci_chipset_tag_t pc = sc->sc_pc;
   4599 	const char *intrstr = NULL;
   4600 	char intrbuf[PCI_INTRSTR_LEN];
   4601 	char intr_xname[INTRDEVNAMEBUF];
   4602 
   4603 	if (sc->sc_nqueues < ncpu) {
   4604 		/*
   4605 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4606 		 * interrupts start from CPU#1.
   4607 		 */
   4608 		sc->sc_affinity_offset = 1;
   4609 	} else {
   4610 		/*
   4611 		 * In this case, this device use all CPUs. So, we unify
   4612 		 * affinitied cpu_index to msix vector number for readability.
   4613 		 */
   4614 		sc->sc_affinity_offset = 0;
   4615 	}
   4616 
   4617 	error = wm_alloc_txrx_queues(sc);
   4618 	if (error) {
   4619 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4620 		    error);
   4621 		return ENOMEM;
   4622 	}
   4623 
   4624 	kcpuset_create(&affinity, false);
   4625 	intr_idx = 0;
   4626 
   4627 	/*
   4628 	 * TX and RX
   4629 	 */
   4630 	txrx_established = 0;
   4631 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4632 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4633 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4634 
   4635 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4636 		    sizeof(intrbuf));
   4637 #ifdef WM_MPSAFE
   4638 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4639 		    PCI_INTR_MPSAFE, true);
   4640 #endif
   4641 		memset(intr_xname, 0, sizeof(intr_xname));
   4642 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4643 		    device_xname(sc->sc_dev), qidx);
   4644 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4645 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4646 		if (vih == NULL) {
   4647 			aprint_error_dev(sc->sc_dev,
   4648 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4649 			    intrstr ? " at " : "",
   4650 			    intrstr ? intrstr : "");
   4651 
   4652 			goto fail;
   4653 		}
   4654 		kcpuset_zero(affinity);
   4655 		/* Round-robin affinity */
   4656 		kcpuset_set(affinity, affinity_to);
   4657 		error = interrupt_distribute(vih, affinity, NULL);
   4658 		if (error == 0) {
   4659 			aprint_normal_dev(sc->sc_dev,
   4660 			    "for TX and RX interrupting at %s affinity to %u\n",
   4661 			    intrstr, affinity_to);
   4662 		} else {
   4663 			aprint_normal_dev(sc->sc_dev,
   4664 			    "for TX and RX interrupting at %s\n", intrstr);
   4665 		}
   4666 		sc->sc_ihs[intr_idx] = vih;
   4667 		wmq->wmq_id= qidx;
   4668 		wmq->wmq_intr_idx = intr_idx;
   4669 
   4670 		txrx_established++;
   4671 		intr_idx++;
   4672 	}
   4673 
   4674 	/*
   4675 	 * LINK
   4676 	 */
   4677 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4678 	    sizeof(intrbuf));
   4679 #ifdef WM_MPSAFE
   4680 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4681 #endif
   4682 	memset(intr_xname, 0, sizeof(intr_xname));
   4683 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4684 	    device_xname(sc->sc_dev));
   4685 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4686 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4687 	if (vih == NULL) {
   4688 		aprint_error_dev(sc->sc_dev,
   4689 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4690 		    intrstr ? " at " : "",
   4691 		    intrstr ? intrstr : "");
   4692 
   4693 		goto fail;
   4694 	}
   4695 	/* keep default affinity to LINK interrupt */
   4696 	aprint_normal_dev(sc->sc_dev,
   4697 	    "for LINK interrupting at %s\n", intrstr);
   4698 	sc->sc_ihs[intr_idx] = vih;
   4699 	sc->sc_link_intr_idx = intr_idx;
   4700 
   4701 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4702 	kcpuset_destroy(affinity);
   4703 	return 0;
   4704 
   4705  fail:
   4706 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4707 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4708 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4709 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4710 	}
   4711 
   4712 	kcpuset_destroy(affinity);
   4713 	return ENOMEM;
   4714 }
   4715 
   4716 static void
   4717 wm_turnon(struct wm_softc *sc)
   4718 {
   4719 	int i;
   4720 
   4721 	KASSERT(WM_CORE_LOCKED(sc));
   4722 
   4723 	for(i = 0; i < sc->sc_nqueues; i++) {
   4724 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4725 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4726 
   4727 		mutex_enter(txq->txq_lock);
   4728 		txq->txq_stopping = false;
   4729 		mutex_exit(txq->txq_lock);
   4730 
   4731 		mutex_enter(rxq->rxq_lock);
   4732 		rxq->rxq_stopping = false;
   4733 		mutex_exit(rxq->rxq_lock);
   4734 	}
   4735 
   4736 	sc->sc_core_stopping = false;
   4737 }
   4738 
   4739 static void
   4740 wm_turnoff(struct wm_softc *sc)
   4741 {
   4742 	int i;
   4743 
   4744 	KASSERT(WM_CORE_LOCKED(sc));
   4745 
   4746 	sc->sc_core_stopping = true;
   4747 
   4748 	for(i = 0; i < sc->sc_nqueues; i++) {
   4749 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4750 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4751 
   4752 		mutex_enter(rxq->rxq_lock);
   4753 		rxq->rxq_stopping = true;
   4754 		mutex_exit(rxq->rxq_lock);
   4755 
   4756 		mutex_enter(txq->txq_lock);
   4757 		txq->txq_stopping = true;
   4758 		mutex_exit(txq->txq_lock);
   4759 	}
   4760 }
   4761 
   4762 /*
   4763  * wm_init:		[ifnet interface function]
   4764  *
   4765  *	Initialize the interface.
   4766  */
   4767 static int
   4768 wm_init(struct ifnet *ifp)
   4769 {
   4770 	struct wm_softc *sc = ifp->if_softc;
   4771 	int ret;
   4772 
   4773 	WM_CORE_LOCK(sc);
   4774 	ret = wm_init_locked(ifp);
   4775 	WM_CORE_UNLOCK(sc);
   4776 
   4777 	return ret;
   4778 }
   4779 
   4780 static int
   4781 wm_init_locked(struct ifnet *ifp)
   4782 {
   4783 	struct wm_softc *sc = ifp->if_softc;
   4784 	int i, j, trynum, error = 0;
   4785 	uint32_t reg;
   4786 
   4787 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4788 		device_xname(sc->sc_dev), __func__));
   4789 	KASSERT(WM_CORE_LOCKED(sc));
   4790 
   4791 	/*
   4792 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4793 	 * There is a small but measurable benefit to avoiding the adjusment
   4794 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4795 	 * on such platforms.  One possibility is that the DMA itself is
   4796 	 * slightly more efficient if the front of the entire packet (instead
   4797 	 * of the front of the headers) is aligned.
   4798 	 *
   4799 	 * Note we must always set align_tweak to 0 if we are using
   4800 	 * jumbo frames.
   4801 	 */
   4802 #ifdef __NO_STRICT_ALIGNMENT
   4803 	sc->sc_align_tweak = 0;
   4804 #else
   4805 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4806 		sc->sc_align_tweak = 0;
   4807 	else
   4808 		sc->sc_align_tweak = 2;
   4809 #endif /* __NO_STRICT_ALIGNMENT */
   4810 
   4811 	/* Cancel any pending I/O. */
   4812 	wm_stop_locked(ifp, 0);
   4813 
   4814 	/* update statistics before reset */
   4815 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4816 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4817 
   4818 	/* PCH_SPT hardware workaround */
   4819 	if (sc->sc_type == WM_T_PCH_SPT)
   4820 		wm_flush_desc_rings(sc);
   4821 
   4822 	/* Reset the chip to a known state. */
   4823 	wm_reset(sc);
   4824 
   4825 	/* AMT based hardware can now take control from firmware */
   4826 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4827 		wm_get_hw_control(sc);
   4828 
   4829 	/* Init hardware bits */
   4830 	wm_initialize_hardware_bits(sc);
   4831 
   4832 	/* Reset the PHY. */
   4833 	if (sc->sc_flags & WM_F_HAS_MII)
   4834 		wm_gmii_reset(sc);
   4835 
   4836 	/* Calculate (E)ITR value */
   4837 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4838 		sc->sc_itr = 450;	/* For EITR */
   4839 	} else if (sc->sc_type >= WM_T_82543) {
   4840 		/*
   4841 		 * Set up the interrupt throttling register (units of 256ns)
   4842 		 * Note that a footnote in Intel's documentation says this
   4843 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4844 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4845 		 * that that is also true for the 1024ns units of the other
   4846 		 * interrupt-related timer registers -- so, really, we ought
   4847 		 * to divide this value by 4 when the link speed is low.
   4848 		 *
   4849 		 * XXX implement this division at link speed change!
   4850 		 */
   4851 
   4852 		/*
   4853 		 * For N interrupts/sec, set this value to:
   4854 		 * 1000000000 / (N * 256).  Note that we set the
   4855 		 * absolute and packet timer values to this value
   4856 		 * divided by 4 to get "simple timer" behavior.
   4857 		 */
   4858 
   4859 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4860 	}
   4861 
   4862 	error = wm_init_txrx_queues(sc);
   4863 	if (error)
   4864 		goto out;
   4865 
   4866 	/*
   4867 	 * Clear out the VLAN table -- we don't use it (yet).
   4868 	 */
   4869 	CSR_WRITE(sc, WMREG_VET, 0);
   4870 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4871 		trynum = 10; /* Due to hw errata */
   4872 	else
   4873 		trynum = 1;
   4874 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4875 		for (j = 0; j < trynum; j++)
   4876 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4877 
   4878 	/*
   4879 	 * Set up flow-control parameters.
   4880 	 *
   4881 	 * XXX Values could probably stand some tuning.
   4882 	 */
   4883 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4884 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4885 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4886 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4887 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4888 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4889 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4890 	}
   4891 
   4892 	sc->sc_fcrtl = FCRTL_DFLT;
   4893 	if (sc->sc_type < WM_T_82543) {
   4894 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4895 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4896 	} else {
   4897 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4898 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4899 	}
   4900 
   4901 	if (sc->sc_type == WM_T_80003)
   4902 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4903 	else
   4904 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4905 
   4906 	/* Writes the control register. */
   4907 	wm_set_vlan(sc);
   4908 
   4909 	if (sc->sc_flags & WM_F_HAS_MII) {
   4910 		int val;
   4911 
   4912 		switch (sc->sc_type) {
   4913 		case WM_T_80003:
   4914 		case WM_T_ICH8:
   4915 		case WM_T_ICH9:
   4916 		case WM_T_ICH10:
   4917 		case WM_T_PCH:
   4918 		case WM_T_PCH2:
   4919 		case WM_T_PCH_LPT:
   4920 		case WM_T_PCH_SPT:
   4921 			/*
   4922 			 * Set the mac to wait the maximum time between each
   4923 			 * iteration and increase the max iterations when
   4924 			 * polling the phy; this fixes erroneous timeouts at
   4925 			 * 10Mbps.
   4926 			 */
   4927 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4928 			    0xFFFF);
   4929 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4930 			val |= 0x3F;
   4931 			wm_kmrn_writereg(sc,
   4932 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4933 			break;
   4934 		default:
   4935 			break;
   4936 		}
   4937 
   4938 		if (sc->sc_type == WM_T_80003) {
   4939 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4940 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4941 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4942 
   4943 			/* Bypass RX and TX FIFO's */
   4944 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4945 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4946 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4947 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4948 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4949 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4950 		}
   4951 	}
   4952 #if 0
   4953 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4954 #endif
   4955 
   4956 	/* Set up checksum offload parameters. */
   4957 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4958 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4959 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4960 		reg |= RXCSUM_IPOFL;
   4961 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4962 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4963 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4964 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4965 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4966 
   4967 	/* Set up MSI-X */
   4968 	if (sc->sc_nintrs > 1) {
   4969 		uint32_t ivar;
   4970 		struct wm_queue *wmq;
   4971 		int qid, qintr_idx;
   4972 
   4973 		if (sc->sc_type == WM_T_82575) {
   4974 			/* Interrupt control */
   4975 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4976 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4977 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4978 
   4979 			/* TX and RX */
   4980 			for (i = 0; i < sc->sc_nqueues; i++) {
   4981 				wmq = &sc->sc_queue[i];
   4982 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4983 				    EITR_TX_QUEUE(wmq->wmq_id)
   4984 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4985 			}
   4986 			/* Link status */
   4987 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4988 			    EITR_OTHER);
   4989 		} else if (sc->sc_type == WM_T_82574) {
   4990 			/* Interrupt control */
   4991 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4992 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4993 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4994 
   4995 			ivar = 0;
   4996 			/* TX and RX */
   4997 			for (i = 0; i < sc->sc_nqueues; i++) {
   4998 				wmq = &sc->sc_queue[i];
   4999 				qid = wmq->wmq_id;
   5000 				qintr_idx = wmq->wmq_intr_idx;
   5001 
   5002 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5003 				    IVAR_TX_MASK_Q_82574(qid));
   5004 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5005 				    IVAR_RX_MASK_Q_82574(qid));
   5006 			}
   5007 			/* Link status */
   5008 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5009 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5010 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5011 		} else {
   5012 			/* Interrupt control */
   5013 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5014 			    | GPIE_EIAME | GPIE_PBA);
   5015 
   5016 			switch (sc->sc_type) {
   5017 			case WM_T_82580:
   5018 			case WM_T_I350:
   5019 			case WM_T_I354:
   5020 			case WM_T_I210:
   5021 			case WM_T_I211:
   5022 				/* TX and RX */
   5023 				for (i = 0; i < sc->sc_nqueues; i++) {
   5024 					wmq = &sc->sc_queue[i];
   5025 					qid = wmq->wmq_id;
   5026 					qintr_idx = wmq->wmq_intr_idx;
   5027 
   5028 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5029 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5030 					ivar |= __SHIFTIN((qintr_idx
   5031 						| IVAR_VALID),
   5032 					    IVAR_TX_MASK_Q(qid));
   5033 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5034 					ivar |= __SHIFTIN((qintr_idx
   5035 						| IVAR_VALID),
   5036 					    IVAR_RX_MASK_Q(qid));
   5037 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5038 				}
   5039 				break;
   5040 			case WM_T_82576:
   5041 				/* TX and RX */
   5042 				for (i = 0; i < sc->sc_nqueues; i++) {
   5043 					wmq = &sc->sc_queue[i];
   5044 					qid = wmq->wmq_id;
   5045 					qintr_idx = wmq->wmq_intr_idx;
   5046 
   5047 					ivar = CSR_READ(sc,
   5048 					    WMREG_IVAR_Q_82576(qid));
   5049 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5050 					ivar |= __SHIFTIN((qintr_idx
   5051 						| IVAR_VALID),
   5052 					    IVAR_TX_MASK_Q_82576(qid));
   5053 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5054 					ivar |= __SHIFTIN((qintr_idx
   5055 						| IVAR_VALID),
   5056 					    IVAR_RX_MASK_Q_82576(qid));
   5057 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5058 					    ivar);
   5059 				}
   5060 				break;
   5061 			default:
   5062 				break;
   5063 			}
   5064 
   5065 			/* Link status */
   5066 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5067 			    IVAR_MISC_OTHER);
   5068 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5069 		}
   5070 
   5071 		if (sc->sc_nqueues > 1) {
   5072 			wm_init_rss(sc);
   5073 
   5074 			/*
   5075 			** NOTE: Receive Full-Packet Checksum Offload
   5076 			** is mutually exclusive with Multiqueue. However
   5077 			** this is not the same as TCP/IP checksums which
   5078 			** still work.
   5079 			*/
   5080 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5081 			reg |= RXCSUM_PCSD;
   5082 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5083 		}
   5084 	}
   5085 
   5086 	/* Set up the interrupt registers. */
   5087 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5088 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5089 	    ICR_RXO | ICR_RXT0;
   5090 	if (sc->sc_nintrs > 1) {
   5091 		uint32_t mask;
   5092 		struct wm_queue *wmq;
   5093 
   5094 		switch (sc->sc_type) {
   5095 		case WM_T_82574:
   5096 			CSR_WRITE(sc, WMREG_EIAC_82574,
   5097 			    WMREG_EIAC_82574_MSIX_MASK);
   5098 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   5099 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5100 			break;
   5101 		default:
   5102 			if (sc->sc_type == WM_T_82575) {
   5103 				mask = 0;
   5104 				for (i = 0; i < sc->sc_nqueues; i++) {
   5105 					wmq = &sc->sc_queue[i];
   5106 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5107 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5108 				}
   5109 				mask |= EITR_OTHER;
   5110 			} else {
   5111 				mask = 0;
   5112 				for (i = 0; i < sc->sc_nqueues; i++) {
   5113 					wmq = &sc->sc_queue[i];
   5114 					mask |= 1 << wmq->wmq_intr_idx;
   5115 				}
   5116 				mask |= 1 << sc->sc_link_intr_idx;
   5117 			}
   5118 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5119 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5120 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5121 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5122 			break;
   5123 		}
   5124 	} else
   5125 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5126 
   5127 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5128 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5129 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5130 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   5131 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5132 		reg |= KABGTXD_BGSQLBIAS;
   5133 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5134 	}
   5135 
   5136 	/* Set up the inter-packet gap. */
   5137 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5138 
   5139 	if (sc->sc_type >= WM_T_82543) {
   5140 		/*
   5141 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   5142 		 * the multi queue function with MSI-X.
   5143 		 */
   5144 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5145 			int qidx;
   5146 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5147 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   5148 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   5149 				    sc->sc_itr);
   5150 			}
   5151 			/*
   5152 			 * Link interrupts occur much less than TX
   5153 			 * interrupts and RX interrupts. So, we don't
   5154 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5155 			 * FreeBSD's if_igb.
   5156 			 */
   5157 		} else
   5158 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   5159 	}
   5160 
   5161 	/* Set the VLAN ethernetype. */
   5162 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5163 
   5164 	/*
   5165 	 * Set up the transmit control register; we start out with
   5166 	 * a collision distance suitable for FDX, but update it whe
   5167 	 * we resolve the media type.
   5168 	 */
   5169 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5170 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5171 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5172 	if (sc->sc_type >= WM_T_82571)
   5173 		sc->sc_tctl |= TCTL_MULR;
   5174 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5175 
   5176 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5177 		/* Write TDT after TCTL.EN is set. See the document. */
   5178 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5179 	}
   5180 
   5181 	if (sc->sc_type == WM_T_80003) {
   5182 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5183 		reg &= ~TCTL_EXT_GCEX_MASK;
   5184 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5185 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5186 	}
   5187 
   5188 	/* Set the media. */
   5189 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5190 		goto out;
   5191 
   5192 	/* Configure for OS presence */
   5193 	wm_init_manageability(sc);
   5194 
   5195 	/*
   5196 	 * Set up the receive control register; we actually program
   5197 	 * the register when we set the receive filter.  Use multicast
   5198 	 * address offset type 0.
   5199 	 *
   5200 	 * Only the i82544 has the ability to strip the incoming
   5201 	 * CRC, so we don't enable that feature.
   5202 	 */
   5203 	sc->sc_mchash_type = 0;
   5204 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5205 	    | RCTL_MO(sc->sc_mchash_type);
   5206 
   5207 	/*
   5208 	 * 82574 use one buffer extended Rx descriptor.
   5209 	 */
   5210 	if (sc->sc_type == WM_T_82574)
   5211 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5212 
   5213 	/*
   5214 	 * The I350 has a bug where it always strips the CRC whether
   5215 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5216 	 */
   5217 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5218 	    || (sc->sc_type == WM_T_I210))
   5219 		sc->sc_rctl |= RCTL_SECRC;
   5220 
   5221 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5222 	    && (ifp->if_mtu > ETHERMTU)) {
   5223 		sc->sc_rctl |= RCTL_LPE;
   5224 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5225 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5226 	}
   5227 
   5228 	if (MCLBYTES == 2048) {
   5229 		sc->sc_rctl |= RCTL_2k;
   5230 	} else {
   5231 		if (sc->sc_type >= WM_T_82543) {
   5232 			switch (MCLBYTES) {
   5233 			case 4096:
   5234 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5235 				break;
   5236 			case 8192:
   5237 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5238 				break;
   5239 			case 16384:
   5240 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5241 				break;
   5242 			default:
   5243 				panic("wm_init: MCLBYTES %d unsupported",
   5244 				    MCLBYTES);
   5245 				break;
   5246 			}
   5247 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5248 	}
   5249 
   5250 	/* Set the receive filter. */
   5251 	wm_set_filter(sc);
   5252 
   5253 	/* Enable ECC */
   5254 	switch (sc->sc_type) {
   5255 	case WM_T_82571:
   5256 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5257 		reg |= PBA_ECC_CORR_EN;
   5258 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5259 		break;
   5260 	case WM_T_PCH_LPT:
   5261 	case WM_T_PCH_SPT:
   5262 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5263 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5264 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5265 
   5266 		sc->sc_ctrl |= CTRL_MEHE;
   5267 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5268 		break;
   5269 	default:
   5270 		break;
   5271 	}
   5272 
   5273 	/* On 575 and later set RDT only if RX enabled */
   5274 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5275 		int qidx;
   5276 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5277 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5278 			for (i = 0; i < WM_NRXDESC; i++) {
   5279 				mutex_enter(rxq->rxq_lock);
   5280 				wm_init_rxdesc(rxq, i);
   5281 				mutex_exit(rxq->rxq_lock);
   5282 
   5283 			}
   5284 		}
   5285 	}
   5286 
   5287 	wm_turnon(sc);
   5288 
   5289 	/* Start the one second link check clock. */
   5290 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5291 
   5292 	/* ...all done! */
   5293 	ifp->if_flags |= IFF_RUNNING;
   5294 	ifp->if_flags &= ~IFF_OACTIVE;
   5295 
   5296  out:
   5297 	sc->sc_if_flags = ifp->if_flags;
   5298 	if (error)
   5299 		log(LOG_ERR, "%s: interface not running\n",
   5300 		    device_xname(sc->sc_dev));
   5301 	return error;
   5302 }
   5303 
   5304 /*
   5305  * wm_stop:		[ifnet interface function]
   5306  *
   5307  *	Stop transmission on the interface.
   5308  */
   5309 static void
   5310 wm_stop(struct ifnet *ifp, int disable)
   5311 {
   5312 	struct wm_softc *sc = ifp->if_softc;
   5313 
   5314 	WM_CORE_LOCK(sc);
   5315 	wm_stop_locked(ifp, disable);
   5316 	WM_CORE_UNLOCK(sc);
   5317 }
   5318 
   5319 static void
   5320 wm_stop_locked(struct ifnet *ifp, int disable)
   5321 {
   5322 	struct wm_softc *sc = ifp->if_softc;
   5323 	struct wm_txsoft *txs;
   5324 	int i, qidx;
   5325 
   5326 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5327 		device_xname(sc->sc_dev), __func__));
   5328 	KASSERT(WM_CORE_LOCKED(sc));
   5329 
   5330 	wm_turnoff(sc);
   5331 
   5332 	/* Stop the one second clock. */
   5333 	callout_stop(&sc->sc_tick_ch);
   5334 
   5335 	/* Stop the 82547 Tx FIFO stall check timer. */
   5336 	if (sc->sc_type == WM_T_82547)
   5337 		callout_stop(&sc->sc_txfifo_ch);
   5338 
   5339 	if (sc->sc_flags & WM_F_HAS_MII) {
   5340 		/* Down the MII. */
   5341 		mii_down(&sc->sc_mii);
   5342 	} else {
   5343 #if 0
   5344 		/* Should we clear PHY's status properly? */
   5345 		wm_reset(sc);
   5346 #endif
   5347 	}
   5348 
   5349 	/* Stop the transmit and receive processes. */
   5350 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5351 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5352 	sc->sc_rctl &= ~RCTL_EN;
   5353 
   5354 	/*
   5355 	 * Clear the interrupt mask to ensure the device cannot assert its
   5356 	 * interrupt line.
   5357 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5358 	 * service any currently pending or shared interrupt.
   5359 	 */
   5360 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5361 	sc->sc_icr = 0;
   5362 	if (sc->sc_nintrs > 1) {
   5363 		if (sc->sc_type != WM_T_82574) {
   5364 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5365 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5366 		} else
   5367 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5368 	}
   5369 
   5370 	/* Release any queued transmit buffers. */
   5371 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5372 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5373 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5374 		mutex_enter(txq->txq_lock);
   5375 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5376 			txs = &txq->txq_soft[i];
   5377 			if (txs->txs_mbuf != NULL) {
   5378 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5379 				m_freem(txs->txs_mbuf);
   5380 				txs->txs_mbuf = NULL;
   5381 			}
   5382 		}
   5383 		mutex_exit(txq->txq_lock);
   5384 	}
   5385 
   5386 	/* Mark the interface as down and cancel the watchdog timer. */
   5387 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5388 	ifp->if_timer = 0;
   5389 
   5390 	if (disable) {
   5391 		for (i = 0; i < sc->sc_nqueues; i++) {
   5392 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5393 			mutex_enter(rxq->rxq_lock);
   5394 			wm_rxdrain(rxq);
   5395 			mutex_exit(rxq->rxq_lock);
   5396 		}
   5397 	}
   5398 
   5399 #if 0 /* notyet */
   5400 	if (sc->sc_type >= WM_T_82544)
   5401 		CSR_WRITE(sc, WMREG_WUC, 0);
   5402 #endif
   5403 }
   5404 
   5405 static void
   5406 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5407 {
   5408 	struct mbuf *m;
   5409 	int i;
   5410 
   5411 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5412 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5413 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5414 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5415 		    m->m_data, m->m_len, m->m_flags);
   5416 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5417 	    i, i == 1 ? "" : "s");
   5418 }
   5419 
   5420 /*
   5421  * wm_82547_txfifo_stall:
   5422  *
   5423  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5424  *	reset the FIFO pointers, and restart packet transmission.
   5425  */
   5426 static void
   5427 wm_82547_txfifo_stall(void *arg)
   5428 {
   5429 	struct wm_softc *sc = arg;
   5430 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5431 
   5432 	mutex_enter(txq->txq_lock);
   5433 
   5434 	if (txq->txq_stopping)
   5435 		goto out;
   5436 
   5437 	if (txq->txq_fifo_stall) {
   5438 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5439 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5440 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5441 			/*
   5442 			 * Packets have drained.  Stop transmitter, reset
   5443 			 * FIFO pointers, restart transmitter, and kick
   5444 			 * the packet queue.
   5445 			 */
   5446 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5447 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5448 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5449 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5450 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5451 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5452 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5453 			CSR_WRITE_FLUSH(sc);
   5454 
   5455 			txq->txq_fifo_head = 0;
   5456 			txq->txq_fifo_stall = 0;
   5457 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5458 		} else {
   5459 			/*
   5460 			 * Still waiting for packets to drain; try again in
   5461 			 * another tick.
   5462 			 */
   5463 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5464 		}
   5465 	}
   5466 
   5467 out:
   5468 	mutex_exit(txq->txq_lock);
   5469 }
   5470 
   5471 /*
   5472  * wm_82547_txfifo_bugchk:
   5473  *
   5474  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5475  *	prevent enqueueing a packet that would wrap around the end
   5476  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5477  *
   5478  *	We do this by checking the amount of space before the end
   5479  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5480  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5481  *	the internal FIFO pointers to the beginning, and restart
   5482  *	transmission on the interface.
   5483  */
   5484 #define	WM_FIFO_HDR		0x10
   5485 #define	WM_82547_PAD_LEN	0x3e0
   5486 static int
   5487 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5488 {
   5489 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5490 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5491 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5492 
   5493 	/* Just return if already stalled. */
   5494 	if (txq->txq_fifo_stall)
   5495 		return 1;
   5496 
   5497 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5498 		/* Stall only occurs in half-duplex mode. */
   5499 		goto send_packet;
   5500 	}
   5501 
   5502 	if (len >= WM_82547_PAD_LEN + space) {
   5503 		txq->txq_fifo_stall = 1;
   5504 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5505 		return 1;
   5506 	}
   5507 
   5508  send_packet:
   5509 	txq->txq_fifo_head += len;
   5510 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5511 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5512 
   5513 	return 0;
   5514 }
   5515 
   5516 static int
   5517 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5518 {
   5519 	int error;
   5520 
   5521 	/*
   5522 	 * Allocate the control data structures, and create and load the
   5523 	 * DMA map for it.
   5524 	 *
   5525 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5526 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5527 	 * both sets within the same 4G segment.
   5528 	 */
   5529 	if (sc->sc_type < WM_T_82544)
   5530 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5531 	else
   5532 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5533 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5534 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5535 	else
   5536 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5537 
   5538 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5539 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5540 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5541 		aprint_error_dev(sc->sc_dev,
   5542 		    "unable to allocate TX control data, error = %d\n",
   5543 		    error);
   5544 		goto fail_0;
   5545 	}
   5546 
   5547 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5548 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5549 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5550 		aprint_error_dev(sc->sc_dev,
   5551 		    "unable to map TX control data, error = %d\n", error);
   5552 		goto fail_1;
   5553 	}
   5554 
   5555 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5556 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5557 		aprint_error_dev(sc->sc_dev,
   5558 		    "unable to create TX control data DMA map, error = %d\n",
   5559 		    error);
   5560 		goto fail_2;
   5561 	}
   5562 
   5563 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5564 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5565 		aprint_error_dev(sc->sc_dev,
   5566 		    "unable to load TX control data DMA map, error = %d\n",
   5567 		    error);
   5568 		goto fail_3;
   5569 	}
   5570 
   5571 	return 0;
   5572 
   5573  fail_3:
   5574 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5575  fail_2:
   5576 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5577 	    WM_TXDESCS_SIZE(txq));
   5578  fail_1:
   5579 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5580  fail_0:
   5581 	return error;
   5582 }
   5583 
   5584 static void
   5585 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5586 {
   5587 
   5588 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5589 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5590 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5591 	    WM_TXDESCS_SIZE(txq));
   5592 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5593 }
   5594 
   5595 static int
   5596 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5597 {
   5598 	int error;
   5599 	size_t rxq_descs_size;
   5600 
   5601 	/*
   5602 	 * Allocate the control data structures, and create and load the
   5603 	 * DMA map for it.
   5604 	 *
   5605 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5606 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5607 	 * both sets within the same 4G segment.
   5608 	 */
   5609 	rxq->rxq_ndesc = WM_NRXDESC;
   5610 	if (sc->sc_type == WM_T_82574)
   5611 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   5612 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5613 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   5614 	else
   5615 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   5616 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   5617 
   5618 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   5619 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5620 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5621 		aprint_error_dev(sc->sc_dev,
   5622 		    "unable to allocate RX control data, error = %d\n",
   5623 		    error);
   5624 		goto fail_0;
   5625 	}
   5626 
   5627 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5628 		    rxq->rxq_desc_rseg, rxq_descs_size,
   5629 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5630 		aprint_error_dev(sc->sc_dev,
   5631 		    "unable to map RX control data, error = %d\n", error);
   5632 		goto fail_1;
   5633 	}
   5634 
   5635 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   5636 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5637 		aprint_error_dev(sc->sc_dev,
   5638 		    "unable to create RX control data DMA map, error = %d\n",
   5639 		    error);
   5640 		goto fail_2;
   5641 	}
   5642 
   5643 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5644 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   5645 		aprint_error_dev(sc->sc_dev,
   5646 		    "unable to load RX control data DMA map, error = %d\n",
   5647 		    error);
   5648 		goto fail_3;
   5649 	}
   5650 
   5651 	return 0;
   5652 
   5653  fail_3:
   5654 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5655  fail_2:
   5656 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5657 	    rxq_descs_size);
   5658  fail_1:
   5659 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5660  fail_0:
   5661 	return error;
   5662 }
   5663 
   5664 static void
   5665 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5666 {
   5667 
   5668 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5669 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5670 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5671 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   5672 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5673 }
   5674 
   5675 
   5676 static int
   5677 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5678 {
   5679 	int i, error;
   5680 
   5681 	/* Create the transmit buffer DMA maps. */
   5682 	WM_TXQUEUELEN(txq) =
   5683 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5684 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5685 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5686 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5687 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5688 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5689 			aprint_error_dev(sc->sc_dev,
   5690 			    "unable to create Tx DMA map %d, error = %d\n",
   5691 			    i, error);
   5692 			goto fail;
   5693 		}
   5694 	}
   5695 
   5696 	return 0;
   5697 
   5698  fail:
   5699 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5700 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5701 			bus_dmamap_destroy(sc->sc_dmat,
   5702 			    txq->txq_soft[i].txs_dmamap);
   5703 	}
   5704 	return error;
   5705 }
   5706 
   5707 static void
   5708 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5709 {
   5710 	int i;
   5711 
   5712 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5713 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5714 			bus_dmamap_destroy(sc->sc_dmat,
   5715 			    txq->txq_soft[i].txs_dmamap);
   5716 	}
   5717 }
   5718 
   5719 static int
   5720 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5721 {
   5722 	int i, error;
   5723 
   5724 	/* Create the receive buffer DMA maps. */
   5725 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5726 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5727 			    MCLBYTES, 0, 0,
   5728 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5729 			aprint_error_dev(sc->sc_dev,
   5730 			    "unable to create Rx DMA map %d error = %d\n",
   5731 			    i, error);
   5732 			goto fail;
   5733 		}
   5734 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5735 	}
   5736 
   5737 	return 0;
   5738 
   5739  fail:
   5740 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5741 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5742 			bus_dmamap_destroy(sc->sc_dmat,
   5743 			    rxq->rxq_soft[i].rxs_dmamap);
   5744 	}
   5745 	return error;
   5746 }
   5747 
   5748 static void
   5749 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5750 {
   5751 	int i;
   5752 
   5753 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5754 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5755 			bus_dmamap_destroy(sc->sc_dmat,
   5756 			    rxq->rxq_soft[i].rxs_dmamap);
   5757 	}
   5758 }
   5759 
   5760 /*
   5761  * wm_alloc_quques:
   5762  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5763  */
   5764 static int
   5765 wm_alloc_txrx_queues(struct wm_softc *sc)
   5766 {
   5767 	int i, error, tx_done, rx_done;
   5768 
   5769 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5770 	    KM_SLEEP);
   5771 	if (sc->sc_queue == NULL) {
   5772 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5773 		error = ENOMEM;
   5774 		goto fail_0;
   5775 	}
   5776 
   5777 	/*
   5778 	 * For transmission
   5779 	 */
   5780 	error = 0;
   5781 	tx_done = 0;
   5782 	for (i = 0; i < sc->sc_nqueues; i++) {
   5783 #ifdef WM_EVENT_COUNTERS
   5784 		int j;
   5785 		const char *xname;
   5786 #endif
   5787 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5788 		txq->txq_sc = sc;
   5789 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5790 
   5791 		error = wm_alloc_tx_descs(sc, txq);
   5792 		if (error)
   5793 			break;
   5794 		error = wm_alloc_tx_buffer(sc, txq);
   5795 		if (error) {
   5796 			wm_free_tx_descs(sc, txq);
   5797 			break;
   5798 		}
   5799 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5800 		if (txq->txq_interq == NULL) {
   5801 			wm_free_tx_descs(sc, txq);
   5802 			wm_free_tx_buffer(sc, txq);
   5803 			error = ENOMEM;
   5804 			break;
   5805 		}
   5806 
   5807 #ifdef WM_EVENT_COUNTERS
   5808 		xname = device_xname(sc->sc_dev);
   5809 
   5810 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5811 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5812 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5813 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5814 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5815 
   5816 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5817 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5818 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5819 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5820 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5821 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5822 
   5823 		for (j = 0; j < WM_NTXSEGS; j++) {
   5824 			snprintf(txq->txq_txseg_evcnt_names[j],
   5825 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   5826 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   5827 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   5828 		}
   5829 
   5830 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   5831 
   5832 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   5833 #endif /* WM_EVENT_COUNTERS */
   5834 
   5835 		tx_done++;
   5836 	}
   5837 	if (error)
   5838 		goto fail_1;
   5839 
   5840 	/*
   5841 	 * For recieve
   5842 	 */
   5843 	error = 0;
   5844 	rx_done = 0;
   5845 	for (i = 0; i < sc->sc_nqueues; i++) {
   5846 #ifdef WM_EVENT_COUNTERS
   5847 		const char *xname;
   5848 #endif
   5849 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5850 		rxq->rxq_sc = sc;
   5851 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5852 
   5853 		error = wm_alloc_rx_descs(sc, rxq);
   5854 		if (error)
   5855 			break;
   5856 
   5857 		error = wm_alloc_rx_buffer(sc, rxq);
   5858 		if (error) {
   5859 			wm_free_rx_descs(sc, rxq);
   5860 			break;
   5861 		}
   5862 
   5863 #ifdef WM_EVENT_COUNTERS
   5864 		xname = device_xname(sc->sc_dev);
   5865 
   5866 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   5867 
   5868 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   5869 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   5870 #endif /* WM_EVENT_COUNTERS */
   5871 
   5872 		rx_done++;
   5873 	}
   5874 	if (error)
   5875 		goto fail_2;
   5876 
   5877 	return 0;
   5878 
   5879  fail_2:
   5880 	for (i = 0; i < rx_done; i++) {
   5881 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5882 		wm_free_rx_buffer(sc, rxq);
   5883 		wm_free_rx_descs(sc, rxq);
   5884 		if (rxq->rxq_lock)
   5885 			mutex_obj_free(rxq->rxq_lock);
   5886 	}
   5887  fail_1:
   5888 	for (i = 0; i < tx_done; i++) {
   5889 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5890 		pcq_destroy(txq->txq_interq);
   5891 		wm_free_tx_buffer(sc, txq);
   5892 		wm_free_tx_descs(sc, txq);
   5893 		if (txq->txq_lock)
   5894 			mutex_obj_free(txq->txq_lock);
   5895 	}
   5896 
   5897 	kmem_free(sc->sc_queue,
   5898 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5899  fail_0:
   5900 	return error;
   5901 }
   5902 
   5903 /*
   5904  * wm_free_quques:
   5905  *	Free {tx,rx}descs and {tx,rx} buffers
   5906  */
   5907 static void
   5908 wm_free_txrx_queues(struct wm_softc *sc)
   5909 {
   5910 	int i;
   5911 
   5912 	for (i = 0; i < sc->sc_nqueues; i++) {
   5913 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5914 		wm_free_rx_buffer(sc, rxq);
   5915 		wm_free_rx_descs(sc, rxq);
   5916 		if (rxq->rxq_lock)
   5917 			mutex_obj_free(rxq->rxq_lock);
   5918 	}
   5919 
   5920 	for (i = 0; i < sc->sc_nqueues; i++) {
   5921 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5922 		struct mbuf *m;
   5923 
   5924 		/* drain txq_interq */
   5925 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   5926 			m_freem(m);
   5927 		pcq_destroy(txq->txq_interq);
   5928 
   5929 		wm_free_tx_buffer(sc, txq);
   5930 		wm_free_tx_descs(sc, txq);
   5931 		if (txq->txq_lock)
   5932 			mutex_obj_free(txq->txq_lock);
   5933 	}
   5934 
   5935 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5936 }
   5937 
   5938 static void
   5939 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5940 {
   5941 
   5942 	KASSERT(mutex_owned(txq->txq_lock));
   5943 
   5944 	/* Initialize the transmit descriptor ring. */
   5945 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5946 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5947 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5948 	txq->txq_free = WM_NTXDESC(txq);
   5949 	txq->txq_next = 0;
   5950 }
   5951 
   5952 static void
   5953 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5954     struct wm_txqueue *txq)
   5955 {
   5956 
   5957 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5958 		device_xname(sc->sc_dev), __func__));
   5959 	KASSERT(mutex_owned(txq->txq_lock));
   5960 
   5961 	if (sc->sc_type < WM_T_82543) {
   5962 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5963 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5964 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5965 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5966 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5967 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5968 	} else {
   5969 		int qid = wmq->wmq_id;
   5970 
   5971 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5972 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5973 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5974 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5975 
   5976 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5977 			/*
   5978 			 * Don't write TDT before TCTL.EN is set.
   5979 			 * See the document.
   5980 			 */
   5981 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5982 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5983 			    | TXDCTL_WTHRESH(0));
   5984 		else {
   5985 			/* ITR / 4 */
   5986 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5987 			if (sc->sc_type >= WM_T_82540) {
   5988 				/* should be same */
   5989 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5990 			}
   5991 
   5992 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5993 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5994 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5995 		}
   5996 	}
   5997 }
   5998 
   5999 static void
   6000 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6001 {
   6002 	int i;
   6003 
   6004 	KASSERT(mutex_owned(txq->txq_lock));
   6005 
   6006 	/* Initialize the transmit job descriptors. */
   6007 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6008 		txq->txq_soft[i].txs_mbuf = NULL;
   6009 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6010 	txq->txq_snext = 0;
   6011 	txq->txq_sdirty = 0;
   6012 }
   6013 
   6014 static void
   6015 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6016     struct wm_txqueue *txq)
   6017 {
   6018 
   6019 	KASSERT(mutex_owned(txq->txq_lock));
   6020 
   6021 	/*
   6022 	 * Set up some register offsets that are different between
   6023 	 * the i82542 and the i82543 and later chips.
   6024 	 */
   6025 	if (sc->sc_type < WM_T_82543)
   6026 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6027 	else
   6028 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6029 
   6030 	wm_init_tx_descs(sc, txq);
   6031 	wm_init_tx_regs(sc, wmq, txq);
   6032 	wm_init_tx_buffer(sc, txq);
   6033 }
   6034 
   6035 static void
   6036 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6037     struct wm_rxqueue *rxq)
   6038 {
   6039 
   6040 	KASSERT(mutex_owned(rxq->rxq_lock));
   6041 
   6042 	/*
   6043 	 * Initialize the receive descriptor and receive job
   6044 	 * descriptor rings.
   6045 	 */
   6046 	if (sc->sc_type < WM_T_82543) {
   6047 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6048 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6049 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6050 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6051 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6052 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6053 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6054 
   6055 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6056 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6057 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6058 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6059 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6060 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6061 	} else {
   6062 		int qid = wmq->wmq_id;
   6063 
   6064 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6065 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6066 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6067 
   6068 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6069 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6070 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   6071 
   6072 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6073 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6074 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6075 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6076 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6077 			    | RXDCTL_WTHRESH(1));
   6078 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6079 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6080 		} else {
   6081 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6082 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6083 			/* ITR / 4 */
   6084 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   6085 			/* MUST be same */
   6086 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   6087 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6088 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6089 		}
   6090 	}
   6091 }
   6092 
   6093 static int
   6094 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6095 {
   6096 	struct wm_rxsoft *rxs;
   6097 	int error, i;
   6098 
   6099 	KASSERT(mutex_owned(rxq->rxq_lock));
   6100 
   6101 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6102 		rxs = &rxq->rxq_soft[i];
   6103 		if (rxs->rxs_mbuf == NULL) {
   6104 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6105 				log(LOG_ERR, "%s: unable to allocate or map "
   6106 				    "rx buffer %d, error = %d\n",
   6107 				    device_xname(sc->sc_dev), i, error);
   6108 				/*
   6109 				 * XXX Should attempt to run with fewer receive
   6110 				 * XXX buffers instead of just failing.
   6111 				 */
   6112 				wm_rxdrain(rxq);
   6113 				return ENOMEM;
   6114 			}
   6115 		} else {
   6116 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6117 				wm_init_rxdesc(rxq, i);
   6118 			/*
   6119 			 * For 82575 and newer device, the RX descriptors
   6120 			 * must be initialized after the setting of RCTL.EN in
   6121 			 * wm_set_filter()
   6122 			 */
   6123 		}
   6124 	}
   6125 	rxq->rxq_ptr = 0;
   6126 	rxq->rxq_discard = 0;
   6127 	WM_RXCHAIN_RESET(rxq);
   6128 
   6129 	return 0;
   6130 }
   6131 
   6132 static int
   6133 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6134     struct wm_rxqueue *rxq)
   6135 {
   6136 
   6137 	KASSERT(mutex_owned(rxq->rxq_lock));
   6138 
   6139 	/*
   6140 	 * Set up some register offsets that are different between
   6141 	 * the i82542 and the i82543 and later chips.
   6142 	 */
   6143 	if (sc->sc_type < WM_T_82543)
   6144 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6145 	else
   6146 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6147 
   6148 	wm_init_rx_regs(sc, wmq, rxq);
   6149 	return wm_init_rx_buffer(sc, rxq);
   6150 }
   6151 
   6152 /*
   6153  * wm_init_quques:
   6154  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6155  */
   6156 static int
   6157 wm_init_txrx_queues(struct wm_softc *sc)
   6158 {
   6159 	int i, error = 0;
   6160 
   6161 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6162 		device_xname(sc->sc_dev), __func__));
   6163 
   6164 	for (i = 0; i < sc->sc_nqueues; i++) {
   6165 		struct wm_queue *wmq = &sc->sc_queue[i];
   6166 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6167 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6168 
   6169 		mutex_enter(txq->txq_lock);
   6170 		wm_init_tx_queue(sc, wmq, txq);
   6171 		mutex_exit(txq->txq_lock);
   6172 
   6173 		mutex_enter(rxq->rxq_lock);
   6174 		error = wm_init_rx_queue(sc, wmq, rxq);
   6175 		mutex_exit(rxq->rxq_lock);
   6176 		if (error)
   6177 			break;
   6178 	}
   6179 
   6180 	return error;
   6181 }
   6182 
   6183 /*
   6184  * wm_tx_offload:
   6185  *
   6186  *	Set up TCP/IP checksumming parameters for the
   6187  *	specified packet.
   6188  */
   6189 static int
   6190 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   6191     uint8_t *fieldsp)
   6192 {
   6193 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6194 	struct mbuf *m0 = txs->txs_mbuf;
   6195 	struct livengood_tcpip_ctxdesc *t;
   6196 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6197 	uint32_t ipcse;
   6198 	struct ether_header *eh;
   6199 	int offset, iphl;
   6200 	uint8_t fields;
   6201 
   6202 	/*
   6203 	 * XXX It would be nice if the mbuf pkthdr had offset
   6204 	 * fields for the protocol headers.
   6205 	 */
   6206 
   6207 	eh = mtod(m0, struct ether_header *);
   6208 	switch (htons(eh->ether_type)) {
   6209 	case ETHERTYPE_IP:
   6210 	case ETHERTYPE_IPV6:
   6211 		offset = ETHER_HDR_LEN;
   6212 		break;
   6213 
   6214 	case ETHERTYPE_VLAN:
   6215 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6216 		break;
   6217 
   6218 	default:
   6219 		/*
   6220 		 * Don't support this protocol or encapsulation.
   6221 		 */
   6222 		*fieldsp = 0;
   6223 		*cmdp = 0;
   6224 		return 0;
   6225 	}
   6226 
   6227 	if ((m0->m_pkthdr.csum_flags &
   6228 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6229 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6230 	} else {
   6231 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6232 	}
   6233 	ipcse = offset + iphl - 1;
   6234 
   6235 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6236 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6237 	seg = 0;
   6238 	fields = 0;
   6239 
   6240 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6241 		int hlen = offset + iphl;
   6242 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6243 
   6244 		if (__predict_false(m0->m_len <
   6245 				    (hlen + sizeof(struct tcphdr)))) {
   6246 			/*
   6247 			 * TCP/IP headers are not in the first mbuf; we need
   6248 			 * to do this the slow and painful way.  Let's just
   6249 			 * hope this doesn't happen very often.
   6250 			 */
   6251 			struct tcphdr th;
   6252 
   6253 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6254 
   6255 			m_copydata(m0, hlen, sizeof(th), &th);
   6256 			if (v4) {
   6257 				struct ip ip;
   6258 
   6259 				m_copydata(m0, offset, sizeof(ip), &ip);
   6260 				ip.ip_len = 0;
   6261 				m_copyback(m0,
   6262 				    offset + offsetof(struct ip, ip_len),
   6263 				    sizeof(ip.ip_len), &ip.ip_len);
   6264 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6265 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6266 			} else {
   6267 				struct ip6_hdr ip6;
   6268 
   6269 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6270 				ip6.ip6_plen = 0;
   6271 				m_copyback(m0,
   6272 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6273 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6274 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6275 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6276 			}
   6277 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6278 			    sizeof(th.th_sum), &th.th_sum);
   6279 
   6280 			hlen += th.th_off << 2;
   6281 		} else {
   6282 			/*
   6283 			 * TCP/IP headers are in the first mbuf; we can do
   6284 			 * this the easy way.
   6285 			 */
   6286 			struct tcphdr *th;
   6287 
   6288 			if (v4) {
   6289 				struct ip *ip =
   6290 				    (void *)(mtod(m0, char *) + offset);
   6291 				th = (void *)(mtod(m0, char *) + hlen);
   6292 
   6293 				ip->ip_len = 0;
   6294 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6295 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6296 			} else {
   6297 				struct ip6_hdr *ip6 =
   6298 				    (void *)(mtod(m0, char *) + offset);
   6299 				th = (void *)(mtod(m0, char *) + hlen);
   6300 
   6301 				ip6->ip6_plen = 0;
   6302 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6303 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6304 			}
   6305 			hlen += th->th_off << 2;
   6306 		}
   6307 
   6308 		if (v4) {
   6309 			WM_Q_EVCNT_INCR(txq, txtso);
   6310 			cmdlen |= WTX_TCPIP_CMD_IP;
   6311 		} else {
   6312 			WM_Q_EVCNT_INCR(txq, txtso6);
   6313 			ipcse = 0;
   6314 		}
   6315 		cmd |= WTX_TCPIP_CMD_TSE;
   6316 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6317 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6318 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6319 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6320 	}
   6321 
   6322 	/*
   6323 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6324 	 * offload feature, if we load the context descriptor, we
   6325 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6326 	 */
   6327 
   6328 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6329 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6330 	    WTX_TCPIP_IPCSE(ipcse);
   6331 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6332 		WM_Q_EVCNT_INCR(txq, txipsum);
   6333 		fields |= WTX_IXSM;
   6334 	}
   6335 
   6336 	offset += iphl;
   6337 
   6338 	if (m0->m_pkthdr.csum_flags &
   6339 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6340 		WM_Q_EVCNT_INCR(txq, txtusum);
   6341 		fields |= WTX_TXSM;
   6342 		tucs = WTX_TCPIP_TUCSS(offset) |
   6343 		    WTX_TCPIP_TUCSO(offset +
   6344 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6345 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6346 	} else if ((m0->m_pkthdr.csum_flags &
   6347 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6348 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6349 		fields |= WTX_TXSM;
   6350 		tucs = WTX_TCPIP_TUCSS(offset) |
   6351 		    WTX_TCPIP_TUCSO(offset +
   6352 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6353 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6354 	} else {
   6355 		/* Just initialize it to a valid TCP context. */
   6356 		tucs = WTX_TCPIP_TUCSS(offset) |
   6357 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6358 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6359 	}
   6360 
   6361 	/* Fill in the context descriptor. */
   6362 	t = (struct livengood_tcpip_ctxdesc *)
   6363 	    &txq->txq_descs[txq->txq_next];
   6364 	t->tcpip_ipcs = htole32(ipcs);
   6365 	t->tcpip_tucs = htole32(tucs);
   6366 	t->tcpip_cmdlen = htole32(cmdlen);
   6367 	t->tcpip_seg = htole32(seg);
   6368 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6369 
   6370 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6371 	txs->txs_ndesc++;
   6372 
   6373 	*cmdp = cmd;
   6374 	*fieldsp = fields;
   6375 
   6376 	return 0;
   6377 }
   6378 
   6379 static inline int
   6380 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6381 {
   6382 	struct wm_softc *sc = ifp->if_softc;
   6383 	u_int cpuid = cpu_index(curcpu());
   6384 
   6385 	/*
   6386 	 * Currently, simple distribute strategy.
   6387 	 * TODO:
   6388 	 * distribute by flowid(RSS has value).
   6389 	 */
   6390 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6391 }
   6392 
   6393 /*
   6394  * wm_start:		[ifnet interface function]
   6395  *
   6396  *	Start packet transmission on the interface.
   6397  */
   6398 static void
   6399 wm_start(struct ifnet *ifp)
   6400 {
   6401 	struct wm_softc *sc = ifp->if_softc;
   6402 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6403 
   6404 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6405 
   6406 	/*
   6407 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6408 	 */
   6409 
   6410 	mutex_enter(txq->txq_lock);
   6411 	if (!txq->txq_stopping)
   6412 		wm_start_locked(ifp);
   6413 	mutex_exit(txq->txq_lock);
   6414 }
   6415 
   6416 static void
   6417 wm_start_locked(struct ifnet *ifp)
   6418 {
   6419 	struct wm_softc *sc = ifp->if_softc;
   6420 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6421 
   6422 	wm_send_common_locked(ifp, txq, false);
   6423 }
   6424 
   6425 static int
   6426 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6427 {
   6428 	int qid;
   6429 	struct wm_softc *sc = ifp->if_softc;
   6430 	struct wm_txqueue *txq;
   6431 
   6432 	qid = wm_select_txqueue(ifp, m);
   6433 	txq = &sc->sc_queue[qid].wmq_txq;
   6434 
   6435 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6436 		m_freem(m);
   6437 		WM_Q_EVCNT_INCR(txq, txdrop);
   6438 		return ENOBUFS;
   6439 	}
   6440 
   6441 	/*
   6442 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6443 	 */
   6444 	ifp->if_obytes += m->m_pkthdr.len;
   6445 	if (m->m_flags & M_MCAST)
   6446 		ifp->if_omcasts++;
   6447 
   6448 	if (mutex_tryenter(txq->txq_lock)) {
   6449 		if (!txq->txq_stopping)
   6450 			wm_transmit_locked(ifp, txq);
   6451 		mutex_exit(txq->txq_lock);
   6452 	}
   6453 
   6454 	return 0;
   6455 }
   6456 
   6457 static void
   6458 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6459 {
   6460 
   6461 	wm_send_common_locked(ifp, txq, true);
   6462 }
   6463 
   6464 static void
   6465 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6466     bool is_transmit)
   6467 {
   6468 	struct wm_softc *sc = ifp->if_softc;
   6469 	struct mbuf *m0;
   6470 	struct m_tag *mtag;
   6471 	struct wm_txsoft *txs;
   6472 	bus_dmamap_t dmamap;
   6473 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6474 	bus_addr_t curaddr;
   6475 	bus_size_t seglen, curlen;
   6476 	uint32_t cksumcmd;
   6477 	uint8_t cksumfields;
   6478 
   6479 	KASSERT(mutex_owned(txq->txq_lock));
   6480 
   6481 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6482 		return;
   6483 
   6484 	/* Remember the previous number of free descriptors. */
   6485 	ofree = txq->txq_free;
   6486 
   6487 	/*
   6488 	 * Loop through the send queue, setting up transmit descriptors
   6489 	 * until we drain the queue, or use up all available transmit
   6490 	 * descriptors.
   6491 	 */
   6492 	for (;;) {
   6493 		m0 = NULL;
   6494 
   6495 		/* Get a work queue entry. */
   6496 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6497 			wm_txeof(sc, txq);
   6498 			if (txq->txq_sfree == 0) {
   6499 				DPRINTF(WM_DEBUG_TX,
   6500 				    ("%s: TX: no free job descriptors\n",
   6501 					device_xname(sc->sc_dev)));
   6502 				WM_Q_EVCNT_INCR(txq, txsstall);
   6503 				break;
   6504 			}
   6505 		}
   6506 
   6507 		/* Grab a packet off the queue. */
   6508 		if (is_transmit)
   6509 			m0 = pcq_get(txq->txq_interq);
   6510 		else
   6511 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6512 		if (m0 == NULL)
   6513 			break;
   6514 
   6515 		DPRINTF(WM_DEBUG_TX,
   6516 		    ("%s: TX: have packet to transmit: %p\n",
   6517 		    device_xname(sc->sc_dev), m0));
   6518 
   6519 		txs = &txq->txq_soft[txq->txq_snext];
   6520 		dmamap = txs->txs_dmamap;
   6521 
   6522 		use_tso = (m0->m_pkthdr.csum_flags &
   6523 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6524 
   6525 		/*
   6526 		 * So says the Linux driver:
   6527 		 * The controller does a simple calculation to make sure
   6528 		 * there is enough room in the FIFO before initiating the
   6529 		 * DMA for each buffer.  The calc is:
   6530 		 *	4 = ceil(buffer len / MSS)
   6531 		 * To make sure we don't overrun the FIFO, adjust the max
   6532 		 * buffer len if the MSS drops.
   6533 		 */
   6534 		dmamap->dm_maxsegsz =
   6535 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6536 		    ? m0->m_pkthdr.segsz << 2
   6537 		    : WTX_MAX_LEN;
   6538 
   6539 		/*
   6540 		 * Load the DMA map.  If this fails, the packet either
   6541 		 * didn't fit in the allotted number of segments, or we
   6542 		 * were short on resources.  For the too-many-segments
   6543 		 * case, we simply report an error and drop the packet,
   6544 		 * since we can't sanely copy a jumbo packet to a single
   6545 		 * buffer.
   6546 		 */
   6547 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6548 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6549 		if (error) {
   6550 			if (error == EFBIG) {
   6551 				WM_Q_EVCNT_INCR(txq, txdrop);
   6552 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6553 				    "DMA segments, dropping...\n",
   6554 				    device_xname(sc->sc_dev));
   6555 				wm_dump_mbuf_chain(sc, m0);
   6556 				m_freem(m0);
   6557 				continue;
   6558 			}
   6559 			/*  Short on resources, just stop for now. */
   6560 			DPRINTF(WM_DEBUG_TX,
   6561 			    ("%s: TX: dmamap load failed: %d\n",
   6562 			    device_xname(sc->sc_dev), error));
   6563 			break;
   6564 		}
   6565 
   6566 		segs_needed = dmamap->dm_nsegs;
   6567 		if (use_tso) {
   6568 			/* For sentinel descriptor; see below. */
   6569 			segs_needed++;
   6570 		}
   6571 
   6572 		/*
   6573 		 * Ensure we have enough descriptors free to describe
   6574 		 * the packet.  Note, we always reserve one descriptor
   6575 		 * at the end of the ring due to the semantics of the
   6576 		 * TDT register, plus one more in the event we need
   6577 		 * to load offload context.
   6578 		 */
   6579 		if (segs_needed > txq->txq_free - 2) {
   6580 			/*
   6581 			 * Not enough free descriptors to transmit this
   6582 			 * packet.  We haven't committed anything yet,
   6583 			 * so just unload the DMA map, put the packet
   6584 			 * pack on the queue, and punt.  Notify the upper
   6585 			 * layer that there are no more slots left.
   6586 			 */
   6587 			DPRINTF(WM_DEBUG_TX,
   6588 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6589 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6590 			    segs_needed, txq->txq_free - 1));
   6591 			ifp->if_flags |= IFF_OACTIVE;
   6592 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6593 			WM_Q_EVCNT_INCR(txq, txdstall);
   6594 			break;
   6595 		}
   6596 
   6597 		/*
   6598 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6599 		 * once we know we can transmit the packet, since we
   6600 		 * do some internal FIFO space accounting here.
   6601 		 */
   6602 		if (sc->sc_type == WM_T_82547 &&
   6603 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6604 			DPRINTF(WM_DEBUG_TX,
   6605 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6606 			    device_xname(sc->sc_dev)));
   6607 			ifp->if_flags |= IFF_OACTIVE;
   6608 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6609 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6610 			break;
   6611 		}
   6612 
   6613 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6614 
   6615 		DPRINTF(WM_DEBUG_TX,
   6616 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6617 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6618 
   6619 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6620 
   6621 		/*
   6622 		 * Store a pointer to the packet so that we can free it
   6623 		 * later.
   6624 		 *
   6625 		 * Initially, we consider the number of descriptors the
   6626 		 * packet uses the number of DMA segments.  This may be
   6627 		 * incremented by 1 if we do checksum offload (a descriptor
   6628 		 * is used to set the checksum context).
   6629 		 */
   6630 		txs->txs_mbuf = m0;
   6631 		txs->txs_firstdesc = txq->txq_next;
   6632 		txs->txs_ndesc = segs_needed;
   6633 
   6634 		/* Set up offload parameters for this packet. */
   6635 		if (m0->m_pkthdr.csum_flags &
   6636 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6637 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6638 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6639 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6640 					  &cksumfields) != 0) {
   6641 				/* Error message already displayed. */
   6642 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6643 				continue;
   6644 			}
   6645 		} else {
   6646 			cksumcmd = 0;
   6647 			cksumfields = 0;
   6648 		}
   6649 
   6650 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6651 
   6652 		/* Sync the DMA map. */
   6653 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6654 		    BUS_DMASYNC_PREWRITE);
   6655 
   6656 		/* Initialize the transmit descriptor. */
   6657 		for (nexttx = txq->txq_next, seg = 0;
   6658 		     seg < dmamap->dm_nsegs; seg++) {
   6659 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6660 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6661 			     seglen != 0;
   6662 			     curaddr += curlen, seglen -= curlen,
   6663 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6664 				curlen = seglen;
   6665 
   6666 				/*
   6667 				 * So says the Linux driver:
   6668 				 * Work around for premature descriptor
   6669 				 * write-backs in TSO mode.  Append a
   6670 				 * 4-byte sentinel descriptor.
   6671 				 */
   6672 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6673 				    curlen > 8)
   6674 					curlen -= 4;
   6675 
   6676 				wm_set_dma_addr(
   6677 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6678 				txq->txq_descs[nexttx].wtx_cmdlen
   6679 				    = htole32(cksumcmd | curlen);
   6680 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6681 				    = 0;
   6682 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6683 				    = cksumfields;
   6684 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6685 				lasttx = nexttx;
   6686 
   6687 				DPRINTF(WM_DEBUG_TX,
   6688 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6689 				     "len %#04zx\n",
   6690 				    device_xname(sc->sc_dev), nexttx,
   6691 				    (uint64_t)curaddr, curlen));
   6692 			}
   6693 		}
   6694 
   6695 		KASSERT(lasttx != -1);
   6696 
   6697 		/*
   6698 		 * Set up the command byte on the last descriptor of
   6699 		 * the packet.  If we're in the interrupt delay window,
   6700 		 * delay the interrupt.
   6701 		 */
   6702 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6703 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6704 
   6705 		/*
   6706 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6707 		 * up the descriptor to encapsulate the packet for us.
   6708 		 *
   6709 		 * This is only valid on the last descriptor of the packet.
   6710 		 */
   6711 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6712 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6713 			    htole32(WTX_CMD_VLE);
   6714 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6715 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6716 		}
   6717 
   6718 		txs->txs_lastdesc = lasttx;
   6719 
   6720 		DPRINTF(WM_DEBUG_TX,
   6721 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6722 		    device_xname(sc->sc_dev),
   6723 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6724 
   6725 		/* Sync the descriptors we're using. */
   6726 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6727 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6728 
   6729 		/* Give the packet to the chip. */
   6730 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6731 
   6732 		DPRINTF(WM_DEBUG_TX,
   6733 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6734 
   6735 		DPRINTF(WM_DEBUG_TX,
   6736 		    ("%s: TX: finished transmitting packet, job %d\n",
   6737 		    device_xname(sc->sc_dev), txq->txq_snext));
   6738 
   6739 		/* Advance the tx pointer. */
   6740 		txq->txq_free -= txs->txs_ndesc;
   6741 		txq->txq_next = nexttx;
   6742 
   6743 		txq->txq_sfree--;
   6744 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6745 
   6746 		/* Pass the packet to any BPF listeners. */
   6747 		bpf_mtap(ifp, m0);
   6748 	}
   6749 
   6750 	if (m0 != NULL) {
   6751 		ifp->if_flags |= IFF_OACTIVE;
   6752 		WM_Q_EVCNT_INCR(txq, txdrop);
   6753 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6754 			__func__));
   6755 		m_freem(m0);
   6756 	}
   6757 
   6758 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6759 		/* No more slots; notify upper layer. */
   6760 		ifp->if_flags |= IFF_OACTIVE;
   6761 	}
   6762 
   6763 	if (txq->txq_free != ofree) {
   6764 		/* Set a watchdog timer in case the chip flakes out. */
   6765 		ifp->if_timer = 5;
   6766 	}
   6767 }
   6768 
   6769 /*
   6770  * wm_nq_tx_offload:
   6771  *
   6772  *	Set up TCP/IP checksumming parameters for the
   6773  *	specified packet, for NEWQUEUE devices
   6774  */
   6775 static int
   6776 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6777     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6778 {
   6779 	struct mbuf *m0 = txs->txs_mbuf;
   6780 	struct m_tag *mtag;
   6781 	uint32_t vl_len, mssidx, cmdc;
   6782 	struct ether_header *eh;
   6783 	int offset, iphl;
   6784 
   6785 	/*
   6786 	 * XXX It would be nice if the mbuf pkthdr had offset
   6787 	 * fields for the protocol headers.
   6788 	 */
   6789 	*cmdlenp = 0;
   6790 	*fieldsp = 0;
   6791 
   6792 	eh = mtod(m0, struct ether_header *);
   6793 	switch (htons(eh->ether_type)) {
   6794 	case ETHERTYPE_IP:
   6795 	case ETHERTYPE_IPV6:
   6796 		offset = ETHER_HDR_LEN;
   6797 		break;
   6798 
   6799 	case ETHERTYPE_VLAN:
   6800 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6801 		break;
   6802 
   6803 	default:
   6804 		/* Don't support this protocol or encapsulation. */
   6805 		*do_csum = false;
   6806 		return 0;
   6807 	}
   6808 	*do_csum = true;
   6809 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6810 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6811 
   6812 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6813 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6814 
   6815 	if ((m0->m_pkthdr.csum_flags &
   6816 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6817 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6818 	} else {
   6819 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6820 	}
   6821 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6822 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6823 
   6824 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6825 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6826 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6827 		*cmdlenp |= NQTX_CMD_VLE;
   6828 	}
   6829 
   6830 	mssidx = 0;
   6831 
   6832 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6833 		int hlen = offset + iphl;
   6834 		int tcp_hlen;
   6835 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6836 
   6837 		if (__predict_false(m0->m_len <
   6838 				    (hlen + sizeof(struct tcphdr)))) {
   6839 			/*
   6840 			 * TCP/IP headers are not in the first mbuf; we need
   6841 			 * to do this the slow and painful way.  Let's just
   6842 			 * hope this doesn't happen very often.
   6843 			 */
   6844 			struct tcphdr th;
   6845 
   6846 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6847 
   6848 			m_copydata(m0, hlen, sizeof(th), &th);
   6849 			if (v4) {
   6850 				struct ip ip;
   6851 
   6852 				m_copydata(m0, offset, sizeof(ip), &ip);
   6853 				ip.ip_len = 0;
   6854 				m_copyback(m0,
   6855 				    offset + offsetof(struct ip, ip_len),
   6856 				    sizeof(ip.ip_len), &ip.ip_len);
   6857 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6858 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6859 			} else {
   6860 				struct ip6_hdr ip6;
   6861 
   6862 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6863 				ip6.ip6_plen = 0;
   6864 				m_copyback(m0,
   6865 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6866 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6867 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6868 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6869 			}
   6870 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6871 			    sizeof(th.th_sum), &th.th_sum);
   6872 
   6873 			tcp_hlen = th.th_off << 2;
   6874 		} else {
   6875 			/*
   6876 			 * TCP/IP headers are in the first mbuf; we can do
   6877 			 * this the easy way.
   6878 			 */
   6879 			struct tcphdr *th;
   6880 
   6881 			if (v4) {
   6882 				struct ip *ip =
   6883 				    (void *)(mtod(m0, char *) + offset);
   6884 				th = (void *)(mtod(m0, char *) + hlen);
   6885 
   6886 				ip->ip_len = 0;
   6887 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6888 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6889 			} else {
   6890 				struct ip6_hdr *ip6 =
   6891 				    (void *)(mtod(m0, char *) + offset);
   6892 				th = (void *)(mtod(m0, char *) + hlen);
   6893 
   6894 				ip6->ip6_plen = 0;
   6895 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6896 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6897 			}
   6898 			tcp_hlen = th->th_off << 2;
   6899 		}
   6900 		hlen += tcp_hlen;
   6901 		*cmdlenp |= NQTX_CMD_TSE;
   6902 
   6903 		if (v4) {
   6904 			WM_Q_EVCNT_INCR(txq, txtso);
   6905 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6906 		} else {
   6907 			WM_Q_EVCNT_INCR(txq, txtso6);
   6908 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6909 		}
   6910 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6911 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6912 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6913 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6914 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6915 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6916 	} else {
   6917 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6918 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6919 	}
   6920 
   6921 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6922 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6923 		cmdc |= NQTXC_CMD_IP4;
   6924 	}
   6925 
   6926 	if (m0->m_pkthdr.csum_flags &
   6927 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6928 		WM_Q_EVCNT_INCR(txq, txtusum);
   6929 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6930 			cmdc |= NQTXC_CMD_TCP;
   6931 		} else {
   6932 			cmdc |= NQTXC_CMD_UDP;
   6933 		}
   6934 		cmdc |= NQTXC_CMD_IP4;
   6935 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6936 	}
   6937 	if (m0->m_pkthdr.csum_flags &
   6938 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6939 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6940 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6941 			cmdc |= NQTXC_CMD_TCP;
   6942 		} else {
   6943 			cmdc |= NQTXC_CMD_UDP;
   6944 		}
   6945 		cmdc |= NQTXC_CMD_IP6;
   6946 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6947 	}
   6948 
   6949 	/* Fill in the context descriptor. */
   6950 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6951 	    htole32(vl_len);
   6952 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6953 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6954 	    htole32(cmdc);
   6955 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6956 	    htole32(mssidx);
   6957 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6958 	DPRINTF(WM_DEBUG_TX,
   6959 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6960 	    txq->txq_next, 0, vl_len));
   6961 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6962 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6963 	txs->txs_ndesc++;
   6964 	return 0;
   6965 }
   6966 
   6967 /*
   6968  * wm_nq_start:		[ifnet interface function]
   6969  *
   6970  *	Start packet transmission on the interface for NEWQUEUE devices
   6971  */
   6972 static void
   6973 wm_nq_start(struct ifnet *ifp)
   6974 {
   6975 	struct wm_softc *sc = ifp->if_softc;
   6976 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6977 
   6978 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6979 
   6980 	/*
   6981 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6982 	 */
   6983 
   6984 	mutex_enter(txq->txq_lock);
   6985 	if (!txq->txq_stopping)
   6986 		wm_nq_start_locked(ifp);
   6987 	mutex_exit(txq->txq_lock);
   6988 }
   6989 
   6990 static void
   6991 wm_nq_start_locked(struct ifnet *ifp)
   6992 {
   6993 	struct wm_softc *sc = ifp->if_softc;
   6994 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6995 
   6996 	wm_nq_send_common_locked(ifp, txq, false);
   6997 }
   6998 
   6999 static int
   7000 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7001 {
   7002 	int qid;
   7003 	struct wm_softc *sc = ifp->if_softc;
   7004 	struct wm_txqueue *txq;
   7005 
   7006 	qid = wm_select_txqueue(ifp, m);
   7007 	txq = &sc->sc_queue[qid].wmq_txq;
   7008 
   7009 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7010 		m_freem(m);
   7011 		WM_Q_EVCNT_INCR(txq, txdrop);
   7012 		return ENOBUFS;
   7013 	}
   7014 
   7015 	/*
   7016 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7017 	 */
   7018 	ifp->if_obytes += m->m_pkthdr.len;
   7019 	if (m->m_flags & M_MCAST)
   7020 		ifp->if_omcasts++;
   7021 
   7022 	/*
   7023 	 * The situations which this mutex_tryenter() fails at running time
   7024 	 * are below two patterns.
   7025 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7026 	 *     (2) contention with deferred if_start softint(wm_deferred_start())
   7027 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7028 	 * dequeued by wm_deferred_start(). So, it does not get stuck.
   7029 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7030 	 * dequeued by wm_deferred_start(). So, it does not get stuck, either.
   7031 	 */
   7032 	if (mutex_tryenter(txq->txq_lock)) {
   7033 		if (!txq->txq_stopping)
   7034 			wm_nq_transmit_locked(ifp, txq);
   7035 		mutex_exit(txq->txq_lock);
   7036 	}
   7037 
   7038 	return 0;
   7039 }
   7040 
   7041 static void
   7042 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7043 {
   7044 
   7045 	wm_nq_send_common_locked(ifp, txq, true);
   7046 }
   7047 
   7048 static void
   7049 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7050     bool is_transmit)
   7051 {
   7052 	struct wm_softc *sc = ifp->if_softc;
   7053 	struct mbuf *m0;
   7054 	struct m_tag *mtag;
   7055 	struct wm_txsoft *txs;
   7056 	bus_dmamap_t dmamap;
   7057 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7058 	bool do_csum, sent;
   7059 
   7060 	KASSERT(mutex_owned(txq->txq_lock));
   7061 
   7062 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   7063 		return;
   7064 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7065 		return;
   7066 
   7067 	sent = false;
   7068 
   7069 	/*
   7070 	 * Loop through the send queue, setting up transmit descriptors
   7071 	 * until we drain the queue, or use up all available transmit
   7072 	 * descriptors.
   7073 	 */
   7074 	for (;;) {
   7075 		m0 = NULL;
   7076 
   7077 		/* Get a work queue entry. */
   7078 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7079 			wm_txeof(sc, txq);
   7080 			if (txq->txq_sfree == 0) {
   7081 				DPRINTF(WM_DEBUG_TX,
   7082 				    ("%s: TX: no free job descriptors\n",
   7083 					device_xname(sc->sc_dev)));
   7084 				WM_Q_EVCNT_INCR(txq, txsstall);
   7085 				break;
   7086 			}
   7087 		}
   7088 
   7089 		/* Grab a packet off the queue. */
   7090 		if (is_transmit)
   7091 			m0 = pcq_get(txq->txq_interq);
   7092 		else
   7093 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7094 		if (m0 == NULL)
   7095 			break;
   7096 
   7097 		DPRINTF(WM_DEBUG_TX,
   7098 		    ("%s: TX: have packet to transmit: %p\n",
   7099 		    device_xname(sc->sc_dev), m0));
   7100 
   7101 		txs = &txq->txq_soft[txq->txq_snext];
   7102 		dmamap = txs->txs_dmamap;
   7103 
   7104 		/*
   7105 		 * Load the DMA map.  If this fails, the packet either
   7106 		 * didn't fit in the allotted number of segments, or we
   7107 		 * were short on resources.  For the too-many-segments
   7108 		 * case, we simply report an error and drop the packet,
   7109 		 * since we can't sanely copy a jumbo packet to a single
   7110 		 * buffer.
   7111 		 */
   7112 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7113 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7114 		if (error) {
   7115 			if (error == EFBIG) {
   7116 				WM_Q_EVCNT_INCR(txq, txdrop);
   7117 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7118 				    "DMA segments, dropping...\n",
   7119 				    device_xname(sc->sc_dev));
   7120 				wm_dump_mbuf_chain(sc, m0);
   7121 				m_freem(m0);
   7122 				continue;
   7123 			}
   7124 			/* Short on resources, just stop for now. */
   7125 			DPRINTF(WM_DEBUG_TX,
   7126 			    ("%s: TX: dmamap load failed: %d\n",
   7127 			    device_xname(sc->sc_dev), error));
   7128 			break;
   7129 		}
   7130 
   7131 		segs_needed = dmamap->dm_nsegs;
   7132 
   7133 		/*
   7134 		 * Ensure we have enough descriptors free to describe
   7135 		 * the packet.  Note, we always reserve one descriptor
   7136 		 * at the end of the ring due to the semantics of the
   7137 		 * TDT register, plus one more in the event we need
   7138 		 * to load offload context.
   7139 		 */
   7140 		if (segs_needed > txq->txq_free - 2) {
   7141 			/*
   7142 			 * Not enough free descriptors to transmit this
   7143 			 * packet.  We haven't committed anything yet,
   7144 			 * so just unload the DMA map, put the packet
   7145 			 * pack on the queue, and punt.  Notify the upper
   7146 			 * layer that there are no more slots left.
   7147 			 */
   7148 			DPRINTF(WM_DEBUG_TX,
   7149 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7150 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7151 			    segs_needed, txq->txq_free - 1));
   7152 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7153 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7154 			WM_Q_EVCNT_INCR(txq, txdstall);
   7155 			break;
   7156 		}
   7157 
   7158 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7159 
   7160 		DPRINTF(WM_DEBUG_TX,
   7161 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7162 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7163 
   7164 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7165 
   7166 		/*
   7167 		 * Store a pointer to the packet so that we can free it
   7168 		 * later.
   7169 		 *
   7170 		 * Initially, we consider the number of descriptors the
   7171 		 * packet uses the number of DMA segments.  This may be
   7172 		 * incremented by 1 if we do checksum offload (a descriptor
   7173 		 * is used to set the checksum context).
   7174 		 */
   7175 		txs->txs_mbuf = m0;
   7176 		txs->txs_firstdesc = txq->txq_next;
   7177 		txs->txs_ndesc = segs_needed;
   7178 
   7179 		/* Set up offload parameters for this packet. */
   7180 		uint32_t cmdlen, fields, dcmdlen;
   7181 		if (m0->m_pkthdr.csum_flags &
   7182 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7183 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7184 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7185 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7186 			    &do_csum) != 0) {
   7187 				/* Error message already displayed. */
   7188 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7189 				continue;
   7190 			}
   7191 		} else {
   7192 			do_csum = false;
   7193 			cmdlen = 0;
   7194 			fields = 0;
   7195 		}
   7196 
   7197 		/* Sync the DMA map. */
   7198 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7199 		    BUS_DMASYNC_PREWRITE);
   7200 
   7201 		/* Initialize the first transmit descriptor. */
   7202 		nexttx = txq->txq_next;
   7203 		if (!do_csum) {
   7204 			/* setup a legacy descriptor */
   7205 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7206 			    dmamap->dm_segs[0].ds_addr);
   7207 			txq->txq_descs[nexttx].wtx_cmdlen =
   7208 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7209 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7210 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7211 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7212 			    NULL) {
   7213 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7214 				    htole32(WTX_CMD_VLE);
   7215 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7216 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7217 			} else {
   7218 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7219 			}
   7220 			dcmdlen = 0;
   7221 		} else {
   7222 			/* setup an advanced data descriptor */
   7223 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7224 			    htole64(dmamap->dm_segs[0].ds_addr);
   7225 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7226 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7227 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7228 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7229 			    htole32(fields);
   7230 			DPRINTF(WM_DEBUG_TX,
   7231 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7232 			    device_xname(sc->sc_dev), nexttx,
   7233 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7234 			DPRINTF(WM_DEBUG_TX,
   7235 			    ("\t 0x%08x%08x\n", fields,
   7236 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7237 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7238 		}
   7239 
   7240 		lasttx = nexttx;
   7241 		nexttx = WM_NEXTTX(txq, nexttx);
   7242 		/*
   7243 		 * fill in the next descriptors. legacy or adcanced format
   7244 		 * is the same here
   7245 		 */
   7246 		for (seg = 1; seg < dmamap->dm_nsegs;
   7247 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7248 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7249 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7250 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7251 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7252 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7253 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7254 			lasttx = nexttx;
   7255 
   7256 			DPRINTF(WM_DEBUG_TX,
   7257 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7258 			     "len %#04zx\n",
   7259 			    device_xname(sc->sc_dev), nexttx,
   7260 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7261 			    dmamap->dm_segs[seg].ds_len));
   7262 		}
   7263 
   7264 		KASSERT(lasttx != -1);
   7265 
   7266 		/*
   7267 		 * Set up the command byte on the last descriptor of
   7268 		 * the packet.  If we're in the interrupt delay window,
   7269 		 * delay the interrupt.
   7270 		 */
   7271 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7272 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7273 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7274 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7275 
   7276 		txs->txs_lastdesc = lasttx;
   7277 
   7278 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7279 		    device_xname(sc->sc_dev),
   7280 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7281 
   7282 		/* Sync the descriptors we're using. */
   7283 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7284 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7285 
   7286 		/* Give the packet to the chip. */
   7287 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7288 		sent = true;
   7289 
   7290 		DPRINTF(WM_DEBUG_TX,
   7291 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7292 
   7293 		DPRINTF(WM_DEBUG_TX,
   7294 		    ("%s: TX: finished transmitting packet, job %d\n",
   7295 		    device_xname(sc->sc_dev), txq->txq_snext));
   7296 
   7297 		/* Advance the tx pointer. */
   7298 		txq->txq_free -= txs->txs_ndesc;
   7299 		txq->txq_next = nexttx;
   7300 
   7301 		txq->txq_sfree--;
   7302 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7303 
   7304 		/* Pass the packet to any BPF listeners. */
   7305 		bpf_mtap(ifp, m0);
   7306 	}
   7307 
   7308 	if (m0 != NULL) {
   7309 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7310 		WM_Q_EVCNT_INCR(txq, txdrop);
   7311 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7312 			__func__));
   7313 		m_freem(m0);
   7314 	}
   7315 
   7316 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7317 		/* No more slots; notify upper layer. */
   7318 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7319 	}
   7320 
   7321 	if (sent) {
   7322 		/* Set a watchdog timer in case the chip flakes out. */
   7323 		ifp->if_timer = 5;
   7324 	}
   7325 }
   7326 
   7327 static void
   7328 wm_deferred_start(struct ifnet *ifp)
   7329 {
   7330 	struct wm_softc *sc = ifp->if_softc;
   7331 	int qid = 0;
   7332 
   7333 	/*
   7334 	 * Try to transmit on all Tx queues. Passing a txq somehow and
   7335 	 * transmitting only on the txq may be better.
   7336 	 */
   7337 restart:
   7338 	WM_CORE_LOCK(sc);
   7339 	if (sc->sc_core_stopping)
   7340 		goto out;
   7341 
   7342 	for (; qid < sc->sc_nqueues; qid++) {
   7343 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   7344 
   7345 		if (!mutex_tryenter(txq->txq_lock))
   7346 			continue;
   7347 
   7348 		if (txq->txq_stopping) {
   7349 			mutex_exit(txq->txq_lock);
   7350 			continue;
   7351 		}
   7352 		WM_CORE_UNLOCK(sc);
   7353 
   7354 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7355 			/* XXX need for ALTQ */
   7356 			if (qid == 0)
   7357 				wm_nq_start_locked(ifp);
   7358 			wm_nq_transmit_locked(ifp, txq);
   7359 		} else {
   7360 			/* XXX need for ALTQ */
   7361 			if (qid == 0)
   7362 				wm_start_locked(ifp);
   7363 			wm_transmit_locked(ifp, txq);
   7364 		}
   7365 		mutex_exit(txq->txq_lock);
   7366 
   7367 		qid++;
   7368 		goto restart;
   7369 	}
   7370 out:
   7371 	WM_CORE_UNLOCK(sc);
   7372 }
   7373 
   7374 /* Interrupt */
   7375 
   7376 /*
   7377  * wm_txeof:
   7378  *
   7379  *	Helper; handle transmit interrupts.
   7380  */
   7381 static int
   7382 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7383 {
   7384 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7385 	struct wm_txsoft *txs;
   7386 	bool processed = false;
   7387 	int count = 0;
   7388 	int i;
   7389 	uint8_t status;
   7390 
   7391 	KASSERT(mutex_owned(txq->txq_lock));
   7392 
   7393 	if (txq->txq_stopping)
   7394 		return 0;
   7395 
   7396 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7397 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7398 	else
   7399 		ifp->if_flags &= ~IFF_OACTIVE;
   7400 
   7401 	/*
   7402 	 * Go through the Tx list and free mbufs for those
   7403 	 * frames which have been transmitted.
   7404 	 */
   7405 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7406 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7407 		txs = &txq->txq_soft[i];
   7408 
   7409 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7410 			device_xname(sc->sc_dev), i));
   7411 
   7412 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7413 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7414 
   7415 		status =
   7416 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7417 		if ((status & WTX_ST_DD) == 0) {
   7418 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7419 			    BUS_DMASYNC_PREREAD);
   7420 			break;
   7421 		}
   7422 
   7423 		processed = true;
   7424 		count++;
   7425 		DPRINTF(WM_DEBUG_TX,
   7426 		    ("%s: TX: job %d done: descs %d..%d\n",
   7427 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7428 		    txs->txs_lastdesc));
   7429 
   7430 		/*
   7431 		 * XXX We should probably be using the statistics
   7432 		 * XXX registers, but I don't know if they exist
   7433 		 * XXX on chips before the i82544.
   7434 		 */
   7435 
   7436 #ifdef WM_EVENT_COUNTERS
   7437 		if (status & WTX_ST_TU)
   7438 			WM_Q_EVCNT_INCR(txq, tu);
   7439 #endif /* WM_EVENT_COUNTERS */
   7440 
   7441 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7442 			ifp->if_oerrors++;
   7443 			if (status & WTX_ST_LC)
   7444 				log(LOG_WARNING, "%s: late collision\n",
   7445 				    device_xname(sc->sc_dev));
   7446 			else if (status & WTX_ST_EC) {
   7447 				ifp->if_collisions += 16;
   7448 				log(LOG_WARNING, "%s: excessive collisions\n",
   7449 				    device_xname(sc->sc_dev));
   7450 			}
   7451 		} else
   7452 			ifp->if_opackets++;
   7453 
   7454 		txq->txq_free += txs->txs_ndesc;
   7455 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7456 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7457 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7458 		m_freem(txs->txs_mbuf);
   7459 		txs->txs_mbuf = NULL;
   7460 	}
   7461 
   7462 	/* Update the dirty transmit buffer pointer. */
   7463 	txq->txq_sdirty = i;
   7464 	DPRINTF(WM_DEBUG_TX,
   7465 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7466 
   7467 	if (count != 0)
   7468 		rnd_add_uint32(&sc->rnd_source, count);
   7469 
   7470 	/*
   7471 	 * If there are no more pending transmissions, cancel the watchdog
   7472 	 * timer.
   7473 	 */
   7474 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7475 		ifp->if_timer = 0;
   7476 
   7477 	return processed;
   7478 }
   7479 
   7480 static inline uint32_t
   7481 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7482 {
   7483 	struct wm_softc *sc = rxq->rxq_sc;
   7484 
   7485 	if (sc->sc_type == WM_T_82574)
   7486 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7487 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7488 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7489 	else
   7490 		return rxq->rxq_descs[idx].wrx_status;
   7491 }
   7492 
   7493 static inline uint32_t
   7494 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7495 {
   7496 	struct wm_softc *sc = rxq->rxq_sc;
   7497 
   7498 	if (sc->sc_type == WM_T_82574)
   7499 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7500 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7501 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7502 	else
   7503 		return rxq->rxq_descs[idx].wrx_errors;
   7504 }
   7505 
   7506 static inline uint16_t
   7507 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7508 {
   7509 	struct wm_softc *sc = rxq->rxq_sc;
   7510 
   7511 	if (sc->sc_type == WM_T_82574)
   7512 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7513 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7514 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7515 	else
   7516 		return rxq->rxq_descs[idx].wrx_special;
   7517 }
   7518 
   7519 static inline int
   7520 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7521 {
   7522 	struct wm_softc *sc = rxq->rxq_sc;
   7523 
   7524 	if (sc->sc_type == WM_T_82574)
   7525 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   7526 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7527 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   7528 	else
   7529 		return rxq->rxq_descs[idx].wrx_len;
   7530 }
   7531 
   7532 #ifdef WM_DEBUG
   7533 static inline uint32_t
   7534 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   7535 {
   7536 	struct wm_softc *sc = rxq->rxq_sc;
   7537 
   7538 	if (sc->sc_type == WM_T_82574)
   7539 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   7540 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7541 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   7542 	else
   7543 		return 0;
   7544 }
   7545 
   7546 static inline uint8_t
   7547 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   7548 {
   7549 	struct wm_softc *sc = rxq->rxq_sc;
   7550 
   7551 	if (sc->sc_type == WM_T_82574)
   7552 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   7553 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7554 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   7555 	else
   7556 		return 0;
   7557 }
   7558 #endif /* WM_DEBUG */
   7559 
   7560 static inline bool
   7561 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   7562     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7563 {
   7564 
   7565 	if (sc->sc_type == WM_T_82574)
   7566 		return (status & ext_bit) != 0;
   7567 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7568 		return (status & nq_bit) != 0;
   7569 	else
   7570 		return (status & legacy_bit) != 0;
   7571 }
   7572 
   7573 static inline bool
   7574 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   7575     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7576 {
   7577 
   7578 	if (sc->sc_type == WM_T_82574)
   7579 		return (error & ext_bit) != 0;
   7580 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7581 		return (error & nq_bit) != 0;
   7582 	else
   7583 		return (error & legacy_bit) != 0;
   7584 }
   7585 
   7586 static inline bool
   7587 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   7588 {
   7589 
   7590 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7591 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   7592 		return true;
   7593 	else
   7594 		return false;
   7595 }
   7596 
   7597 static inline bool
   7598 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   7599 {
   7600 	struct wm_softc *sc = rxq->rxq_sc;
   7601 
   7602 	/* XXXX missing error bit for newqueue? */
   7603 	if (wm_rxdesc_is_set_error(sc, errors,
   7604 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   7605 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   7606 		NQRXC_ERROR_RXE)) {
   7607 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   7608 			log(LOG_WARNING, "%s: symbol error\n",
   7609 			    device_xname(sc->sc_dev));
   7610 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   7611 			log(LOG_WARNING, "%s: receive sequence error\n",
   7612 			    device_xname(sc->sc_dev));
   7613 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   7614 			log(LOG_WARNING, "%s: CRC error\n",
   7615 			    device_xname(sc->sc_dev));
   7616 		return true;
   7617 	}
   7618 
   7619 	return false;
   7620 }
   7621 
   7622 static inline bool
   7623 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   7624 {
   7625 	struct wm_softc *sc = rxq->rxq_sc;
   7626 
   7627 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   7628 		NQRXC_STATUS_DD)) {
   7629 		/* We have processed all of the receive descriptors. */
   7630 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   7631 		return false;
   7632 	}
   7633 
   7634 	return true;
   7635 }
   7636 
   7637 static inline bool
   7638 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   7639     struct mbuf *m)
   7640 {
   7641 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   7642 
   7643 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7644 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   7645 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   7646 	}
   7647 
   7648 	return true;
   7649 }
   7650 
   7651 static inline void
   7652 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   7653     uint32_t errors, struct mbuf *m)
   7654 {
   7655 	struct wm_softc *sc = rxq->rxq_sc;
   7656 
   7657 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   7658 		if (wm_rxdesc_is_set_status(sc, status,
   7659 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   7660 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   7661 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7662 			if (wm_rxdesc_is_set_error(sc, errors,
   7663 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   7664 				m->m_pkthdr.csum_flags |=
   7665 					M_CSUM_IPv4_BAD;
   7666 		}
   7667 		if (wm_rxdesc_is_set_status(sc, status,
   7668 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   7669 			/*
   7670 			 * Note: we don't know if this was TCP or UDP,
   7671 			 * so we just set both bits, and expect the
   7672 			 * upper layers to deal.
   7673 			 */
   7674 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   7675 			m->m_pkthdr.csum_flags |=
   7676 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7677 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7678 			if (wm_rxdesc_is_set_error(sc, errors,
   7679 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   7680 				m->m_pkthdr.csum_flags |=
   7681 					M_CSUM_TCP_UDP_BAD;
   7682 		}
   7683 	}
   7684 }
   7685 
   7686 /*
   7687  * wm_rxeof:
   7688  *
   7689  *	Helper; handle receive interrupts.
   7690  */
   7691 static void
   7692 wm_rxeof(struct wm_rxqueue *rxq)
   7693 {
   7694 	struct wm_softc *sc = rxq->rxq_sc;
   7695 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7696 	struct wm_rxsoft *rxs;
   7697 	struct mbuf *m;
   7698 	int i, len;
   7699 	int count = 0;
   7700 	uint32_t status, errors;
   7701 	uint16_t vlantag;
   7702 
   7703 	KASSERT(mutex_owned(rxq->rxq_lock));
   7704 
   7705 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7706 		rxs = &rxq->rxq_soft[i];
   7707 
   7708 		DPRINTF(WM_DEBUG_RX,
   7709 		    ("%s: RX: checking descriptor %d\n",
   7710 		    device_xname(sc->sc_dev), i));
   7711 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7712 
   7713 		status = wm_rxdesc_get_status(rxq, i);
   7714 		errors = wm_rxdesc_get_errors(rxq, i);
   7715 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   7716 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   7717 #ifdef WM_DEBUG
   7718 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   7719 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   7720 #endif
   7721 
   7722 		if (!wm_rxdesc_dd(rxq, i, status))
   7723 			break;
   7724 
   7725 		count++;
   7726 		if (__predict_false(rxq->rxq_discard)) {
   7727 			DPRINTF(WM_DEBUG_RX,
   7728 			    ("%s: RX: discarding contents of descriptor %d\n",
   7729 			    device_xname(sc->sc_dev), i));
   7730 			wm_init_rxdesc(rxq, i);
   7731 			if (wm_rxdesc_is_eop(rxq, status)) {
   7732 				/* Reset our state. */
   7733 				DPRINTF(WM_DEBUG_RX,
   7734 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7735 				    device_xname(sc->sc_dev)));
   7736 				rxq->rxq_discard = 0;
   7737 			}
   7738 			continue;
   7739 		}
   7740 
   7741 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7742 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7743 
   7744 		m = rxs->rxs_mbuf;
   7745 
   7746 		/*
   7747 		 * Add a new receive buffer to the ring, unless of
   7748 		 * course the length is zero. Treat the latter as a
   7749 		 * failed mapping.
   7750 		 */
   7751 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7752 			/*
   7753 			 * Failed, throw away what we've done so
   7754 			 * far, and discard the rest of the packet.
   7755 			 */
   7756 			ifp->if_ierrors++;
   7757 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7758 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7759 			wm_init_rxdesc(rxq, i);
   7760 			if (!wm_rxdesc_is_eop(rxq, status))
   7761 				rxq->rxq_discard = 1;
   7762 			if (rxq->rxq_head != NULL)
   7763 				m_freem(rxq->rxq_head);
   7764 			WM_RXCHAIN_RESET(rxq);
   7765 			DPRINTF(WM_DEBUG_RX,
   7766 			    ("%s: RX: Rx buffer allocation failed, "
   7767 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7768 			    rxq->rxq_discard ? " (discard)" : ""));
   7769 			continue;
   7770 		}
   7771 
   7772 		m->m_len = len;
   7773 		rxq->rxq_len += len;
   7774 		DPRINTF(WM_DEBUG_RX,
   7775 		    ("%s: RX: buffer at %p len %d\n",
   7776 		    device_xname(sc->sc_dev), m->m_data, len));
   7777 
   7778 		/* If this is not the end of the packet, keep looking. */
   7779 		if (!wm_rxdesc_is_eop(rxq, status)) {
   7780 			WM_RXCHAIN_LINK(rxq, m);
   7781 			DPRINTF(WM_DEBUG_RX,
   7782 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7783 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7784 			continue;
   7785 		}
   7786 
   7787 		/*
   7788 		 * Okay, we have the entire packet now.  The chip is
   7789 		 * configured to include the FCS except I350 and I21[01]
   7790 		 * (not all chips can be configured to strip it),
   7791 		 * so we need to trim it.
   7792 		 * May need to adjust length of previous mbuf in the
   7793 		 * chain if the current mbuf is too short.
   7794 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7795 		 * is always set in I350, so we don't trim it.
   7796 		 */
   7797 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7798 		    && (sc->sc_type != WM_T_I210)
   7799 		    && (sc->sc_type != WM_T_I211)) {
   7800 			if (m->m_len < ETHER_CRC_LEN) {
   7801 				rxq->rxq_tail->m_len
   7802 				    -= (ETHER_CRC_LEN - m->m_len);
   7803 				m->m_len = 0;
   7804 			} else
   7805 				m->m_len -= ETHER_CRC_LEN;
   7806 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7807 		} else
   7808 			len = rxq->rxq_len;
   7809 
   7810 		WM_RXCHAIN_LINK(rxq, m);
   7811 
   7812 		*rxq->rxq_tailp = NULL;
   7813 		m = rxq->rxq_head;
   7814 
   7815 		WM_RXCHAIN_RESET(rxq);
   7816 
   7817 		DPRINTF(WM_DEBUG_RX,
   7818 		    ("%s: RX: have entire packet, len -> %d\n",
   7819 		    device_xname(sc->sc_dev), len));
   7820 
   7821 		/* If an error occurred, update stats and drop the packet. */
   7822 		if (wm_rxdesc_has_errors(rxq, errors)) {
   7823 			m_freem(m);
   7824 			continue;
   7825 		}
   7826 
   7827 		/* No errors.  Receive the packet. */
   7828 		m_set_rcvif(m, ifp);
   7829 		m->m_pkthdr.len = len;
   7830 		/*
   7831 		 * TODO
   7832 		 * should be save rsshash and rsstype to this mbuf.
   7833 		 */
   7834 		DPRINTF(WM_DEBUG_RX,
   7835 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   7836 			device_xname(sc->sc_dev), rsstype, rsshash));
   7837 
   7838 		/*
   7839 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7840 		 * for us.  Associate the tag with the packet.
   7841 		 */
   7842 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   7843 			continue;
   7844 
   7845 		/* Set up checksum info for this packet. */
   7846 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   7847 
   7848 		mutex_exit(rxq->rxq_lock);
   7849 
   7850 		/* Pass it on. */
   7851 		if_percpuq_enqueue(sc->sc_ipq, m);
   7852 
   7853 		mutex_enter(rxq->rxq_lock);
   7854 
   7855 		if (rxq->rxq_stopping)
   7856 			break;
   7857 	}
   7858 
   7859 	/* Update the receive pointer. */
   7860 	rxq->rxq_ptr = i;
   7861 	if (count != 0)
   7862 		rnd_add_uint32(&sc->rnd_source, count);
   7863 
   7864 	DPRINTF(WM_DEBUG_RX,
   7865 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7866 }
   7867 
   7868 /*
   7869  * wm_linkintr_gmii:
   7870  *
   7871  *	Helper; handle link interrupts for GMII.
   7872  */
   7873 static void
   7874 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7875 {
   7876 
   7877 	KASSERT(WM_CORE_LOCKED(sc));
   7878 
   7879 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7880 		__func__));
   7881 
   7882 	if (icr & ICR_LSC) {
   7883 		uint32_t reg;
   7884 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7885 
   7886 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7887 			wm_gig_downshift_workaround_ich8lan(sc);
   7888 
   7889 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7890 			device_xname(sc->sc_dev)));
   7891 		mii_pollstat(&sc->sc_mii);
   7892 		if (sc->sc_type == WM_T_82543) {
   7893 			int miistatus, active;
   7894 
   7895 			/*
   7896 			 * With 82543, we need to force speed and
   7897 			 * duplex on the MAC equal to what the PHY
   7898 			 * speed and duplex configuration is.
   7899 			 */
   7900 			miistatus = sc->sc_mii.mii_media_status;
   7901 
   7902 			if (miistatus & IFM_ACTIVE) {
   7903 				active = sc->sc_mii.mii_media_active;
   7904 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7905 				switch (IFM_SUBTYPE(active)) {
   7906 				case IFM_10_T:
   7907 					sc->sc_ctrl |= CTRL_SPEED_10;
   7908 					break;
   7909 				case IFM_100_TX:
   7910 					sc->sc_ctrl |= CTRL_SPEED_100;
   7911 					break;
   7912 				case IFM_1000_T:
   7913 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7914 					break;
   7915 				default:
   7916 					/*
   7917 					 * fiber?
   7918 					 * Shoud not enter here.
   7919 					 */
   7920 					printf("unknown media (%x)\n", active);
   7921 					break;
   7922 				}
   7923 				if (active & IFM_FDX)
   7924 					sc->sc_ctrl |= CTRL_FD;
   7925 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7926 			}
   7927 		} else if ((sc->sc_type == WM_T_ICH8)
   7928 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7929 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7930 		} else if (sc->sc_type == WM_T_PCH) {
   7931 			wm_k1_gig_workaround_hv(sc,
   7932 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7933 		}
   7934 
   7935 		if ((sc->sc_phytype == WMPHY_82578)
   7936 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7937 			== IFM_1000_T)) {
   7938 
   7939 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7940 				delay(200*1000); /* XXX too big */
   7941 
   7942 				/* Link stall fix for link up */
   7943 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7944 				    HV_MUX_DATA_CTRL,
   7945 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7946 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7947 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7948 				    HV_MUX_DATA_CTRL,
   7949 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7950 			}
   7951 		}
   7952 		/*
   7953 		 * I217 Packet Loss issue:
   7954 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   7955 		 * on power up.
   7956 		 * Set the Beacon Duration for I217 to 8 usec
   7957 		 */
   7958 		if ((sc->sc_type == WM_T_PCH_LPT)
   7959 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   7960 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   7961 			reg &= ~FEXTNVM4_BEACON_DURATION;
   7962 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   7963 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   7964 		}
   7965 
   7966 		/* XXX Work-around I218 hang issue */
   7967 		/* e1000_k1_workaround_lpt_lp() */
   7968 
   7969 		if ((sc->sc_type == WM_T_PCH_LPT)
   7970 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   7971 			/*
   7972 			 * Set platform power management values for Latency
   7973 			 * Tolerance Reporting (LTR)
   7974 			 */
   7975 			wm_platform_pm_pch_lpt(sc,
   7976 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   7977 				    != 0));
   7978 		}
   7979 
   7980 		/* FEXTNVM6 K1-off workaround */
   7981 		if (sc->sc_type == WM_T_PCH_SPT) {
   7982 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   7983 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   7984 			    & FEXTNVM6_K1_OFF_ENABLE)
   7985 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   7986 			else
   7987 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   7988 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   7989 		}
   7990 	} else if (icr & ICR_RXSEQ) {
   7991 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7992 			device_xname(sc->sc_dev)));
   7993 	}
   7994 }
   7995 
   7996 /*
   7997  * wm_linkintr_tbi:
   7998  *
   7999  *	Helper; handle link interrupts for TBI mode.
   8000  */
   8001 static void
   8002 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8003 {
   8004 	uint32_t status;
   8005 
   8006 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8007 		__func__));
   8008 
   8009 	status = CSR_READ(sc, WMREG_STATUS);
   8010 	if (icr & ICR_LSC) {
   8011 		if (status & STATUS_LU) {
   8012 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8013 			    device_xname(sc->sc_dev),
   8014 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8015 			/*
   8016 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8017 			 * so we should update sc->sc_ctrl
   8018 			 */
   8019 
   8020 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8021 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8022 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8023 			if (status & STATUS_FD)
   8024 				sc->sc_tctl |=
   8025 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8026 			else
   8027 				sc->sc_tctl |=
   8028 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8029 			if (sc->sc_ctrl & CTRL_TFCE)
   8030 				sc->sc_fcrtl |= FCRTL_XONE;
   8031 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8032 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8033 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8034 				      sc->sc_fcrtl);
   8035 			sc->sc_tbi_linkup = 1;
   8036 		} else {
   8037 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8038 			    device_xname(sc->sc_dev)));
   8039 			sc->sc_tbi_linkup = 0;
   8040 		}
   8041 		/* Update LED */
   8042 		wm_tbi_serdes_set_linkled(sc);
   8043 	} else if (icr & ICR_RXSEQ) {
   8044 		DPRINTF(WM_DEBUG_LINK,
   8045 		    ("%s: LINK: Receive sequence error\n",
   8046 		    device_xname(sc->sc_dev)));
   8047 	}
   8048 }
   8049 
   8050 /*
   8051  * wm_linkintr_serdes:
   8052  *
   8053  *	Helper; handle link interrupts for TBI mode.
   8054  */
   8055 static void
   8056 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8057 {
   8058 	struct mii_data *mii = &sc->sc_mii;
   8059 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8060 	uint32_t pcs_adv, pcs_lpab, reg;
   8061 
   8062 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8063 		__func__));
   8064 
   8065 	if (icr & ICR_LSC) {
   8066 		/* Check PCS */
   8067 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8068 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8069 			mii->mii_media_status |= IFM_ACTIVE;
   8070 			sc->sc_tbi_linkup = 1;
   8071 		} else {
   8072 			mii->mii_media_status |= IFM_NONE;
   8073 			sc->sc_tbi_linkup = 0;
   8074 			wm_tbi_serdes_set_linkled(sc);
   8075 			return;
   8076 		}
   8077 		mii->mii_media_active |= IFM_1000_SX;
   8078 		if ((reg & PCS_LSTS_FDX) != 0)
   8079 			mii->mii_media_active |= IFM_FDX;
   8080 		else
   8081 			mii->mii_media_active |= IFM_HDX;
   8082 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8083 			/* Check flow */
   8084 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8085 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8086 				DPRINTF(WM_DEBUG_LINK,
   8087 				    ("XXX LINKOK but not ACOMP\n"));
   8088 				return;
   8089 			}
   8090 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8091 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8092 			DPRINTF(WM_DEBUG_LINK,
   8093 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8094 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8095 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8096 				mii->mii_media_active |= IFM_FLOW
   8097 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8098 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8099 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8100 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8101 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8102 				mii->mii_media_active |= IFM_FLOW
   8103 				    | IFM_ETH_TXPAUSE;
   8104 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8105 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8106 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8107 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8108 				mii->mii_media_active |= IFM_FLOW
   8109 				    | IFM_ETH_RXPAUSE;
   8110 		}
   8111 		/* Update LED */
   8112 		wm_tbi_serdes_set_linkled(sc);
   8113 	} else {
   8114 		DPRINTF(WM_DEBUG_LINK,
   8115 		    ("%s: LINK: Receive sequence error\n",
   8116 		    device_xname(sc->sc_dev)));
   8117 	}
   8118 }
   8119 
   8120 /*
   8121  * wm_linkintr:
   8122  *
   8123  *	Helper; handle link interrupts.
   8124  */
   8125 static void
   8126 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8127 {
   8128 
   8129 	KASSERT(WM_CORE_LOCKED(sc));
   8130 
   8131 	if (sc->sc_flags & WM_F_HAS_MII)
   8132 		wm_linkintr_gmii(sc, icr);
   8133 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8134 	    && (sc->sc_type >= WM_T_82575))
   8135 		wm_linkintr_serdes(sc, icr);
   8136 	else
   8137 		wm_linkintr_tbi(sc, icr);
   8138 }
   8139 
   8140 /*
   8141  * wm_intr_legacy:
   8142  *
   8143  *	Interrupt service routine for INTx and MSI.
   8144  */
   8145 static int
   8146 wm_intr_legacy(void *arg)
   8147 {
   8148 	struct wm_softc *sc = arg;
   8149 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8150 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   8151 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8152 	uint32_t icr, rndval = 0;
   8153 	int handled = 0;
   8154 
   8155 	DPRINTF(WM_DEBUG_TX,
   8156 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   8157 	while (1 /* CONSTCOND */) {
   8158 		icr = CSR_READ(sc, WMREG_ICR);
   8159 		if ((icr & sc->sc_icr) == 0)
   8160 			break;
   8161 		if (rndval == 0)
   8162 			rndval = icr;
   8163 
   8164 		mutex_enter(rxq->rxq_lock);
   8165 
   8166 		if (rxq->rxq_stopping) {
   8167 			mutex_exit(rxq->rxq_lock);
   8168 			break;
   8169 		}
   8170 
   8171 		handled = 1;
   8172 
   8173 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8174 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8175 			DPRINTF(WM_DEBUG_RX,
   8176 			    ("%s: RX: got Rx intr 0x%08x\n",
   8177 			    device_xname(sc->sc_dev),
   8178 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8179 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8180 		}
   8181 #endif
   8182 		wm_rxeof(rxq);
   8183 
   8184 		mutex_exit(rxq->rxq_lock);
   8185 		mutex_enter(txq->txq_lock);
   8186 
   8187 		if (txq->txq_stopping) {
   8188 			mutex_exit(txq->txq_lock);
   8189 			break;
   8190 		}
   8191 
   8192 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8193 		if (icr & ICR_TXDW) {
   8194 			DPRINTF(WM_DEBUG_TX,
   8195 			    ("%s: TX: got TXDW interrupt\n",
   8196 			    device_xname(sc->sc_dev)));
   8197 			WM_Q_EVCNT_INCR(txq, txdw);
   8198 		}
   8199 #endif
   8200 		wm_txeof(sc, txq);
   8201 
   8202 		mutex_exit(txq->txq_lock);
   8203 		WM_CORE_LOCK(sc);
   8204 
   8205 		if (sc->sc_core_stopping) {
   8206 			WM_CORE_UNLOCK(sc);
   8207 			break;
   8208 		}
   8209 
   8210 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8211 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8212 			wm_linkintr(sc, icr);
   8213 		}
   8214 
   8215 		WM_CORE_UNLOCK(sc);
   8216 
   8217 		if (icr & ICR_RXO) {
   8218 #if defined(WM_DEBUG)
   8219 			log(LOG_WARNING, "%s: Receive overrun\n",
   8220 			    device_xname(sc->sc_dev));
   8221 #endif /* defined(WM_DEBUG) */
   8222 		}
   8223 	}
   8224 
   8225 	rnd_add_uint32(&sc->rnd_source, rndval);
   8226 
   8227 	if (handled) {
   8228 		/* Try to get more packets going. */
   8229 		if_schedule_deferred_start(ifp);
   8230 	}
   8231 
   8232 	return handled;
   8233 }
   8234 
   8235 static int
   8236 wm_txrxintr_msix(void *arg)
   8237 {
   8238 	struct wm_queue *wmq = arg;
   8239 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8240 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8241 	struct wm_softc *sc = txq->txq_sc;
   8242 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8243 
   8244 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8245 
   8246 	DPRINTF(WM_DEBUG_TX,
   8247 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8248 
   8249 	if (sc->sc_type == WM_T_82574)
   8250 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8251 	else if (sc->sc_type == WM_T_82575)
   8252 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8253 	else
   8254 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8255 
   8256 	mutex_enter(txq->txq_lock);
   8257 
   8258 	if (txq->txq_stopping) {
   8259 		mutex_exit(txq->txq_lock);
   8260 		return 0;
   8261 	}
   8262 
   8263 	WM_Q_EVCNT_INCR(txq, txdw);
   8264 	wm_txeof(sc, txq);
   8265 
   8266 	/* Try to get more packets going. */
   8267 	if (pcq_peek(txq->txq_interq) != NULL)
   8268 		if_schedule_deferred_start(ifp);
   8269 	/*
   8270 	 * There are still some upper layer processing which call
   8271 	 * ifp->if_start(). e.g. ALTQ
   8272 	 */
   8273 	if (wmq->wmq_id == 0)
   8274 		if_schedule_deferred_start(ifp);
   8275 
   8276 	mutex_exit(txq->txq_lock);
   8277 
   8278 	DPRINTF(WM_DEBUG_RX,
   8279 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8280 	mutex_enter(rxq->rxq_lock);
   8281 
   8282 	if (rxq->rxq_stopping) {
   8283 		mutex_exit(rxq->rxq_lock);
   8284 		return 0;
   8285 	}
   8286 
   8287 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8288 	wm_rxeof(rxq);
   8289 	mutex_exit(rxq->rxq_lock);
   8290 
   8291 	if (sc->sc_type == WM_T_82574)
   8292 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8293 	else if (sc->sc_type == WM_T_82575)
   8294 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8295 	else
   8296 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8297 
   8298 	return 1;
   8299 }
   8300 
   8301 /*
   8302  * wm_linkintr_msix:
   8303  *
   8304  *	Interrupt service routine for link status change for MSI-X.
   8305  */
   8306 static int
   8307 wm_linkintr_msix(void *arg)
   8308 {
   8309 	struct wm_softc *sc = arg;
   8310 	uint32_t reg;
   8311 
   8312 	DPRINTF(WM_DEBUG_LINK,
   8313 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8314 
   8315 	reg = CSR_READ(sc, WMREG_ICR);
   8316 	WM_CORE_LOCK(sc);
   8317 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8318 		goto out;
   8319 
   8320 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8321 	wm_linkintr(sc, ICR_LSC);
   8322 
   8323 out:
   8324 	WM_CORE_UNLOCK(sc);
   8325 
   8326 	if (sc->sc_type == WM_T_82574)
   8327 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8328 	else if (sc->sc_type == WM_T_82575)
   8329 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8330 	else
   8331 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8332 
   8333 	return 1;
   8334 }
   8335 
   8336 /*
   8337  * Media related.
   8338  * GMII, SGMII, TBI (and SERDES)
   8339  */
   8340 
   8341 /* Common */
   8342 
   8343 /*
   8344  * wm_tbi_serdes_set_linkled:
   8345  *
   8346  *	Update the link LED on TBI and SERDES devices.
   8347  */
   8348 static void
   8349 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8350 {
   8351 
   8352 	if (sc->sc_tbi_linkup)
   8353 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8354 	else
   8355 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8356 
   8357 	/* 82540 or newer devices are active low */
   8358 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8359 
   8360 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8361 }
   8362 
   8363 /* GMII related */
   8364 
   8365 /*
   8366  * wm_gmii_reset:
   8367  *
   8368  *	Reset the PHY.
   8369  */
   8370 static void
   8371 wm_gmii_reset(struct wm_softc *sc)
   8372 {
   8373 	uint32_t reg;
   8374 	int rv;
   8375 
   8376 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8377 		device_xname(sc->sc_dev), __func__));
   8378 
   8379 	rv = sc->phy.acquire(sc);
   8380 	if (rv != 0) {
   8381 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8382 		    __func__);
   8383 		return;
   8384 	}
   8385 
   8386 	switch (sc->sc_type) {
   8387 	case WM_T_82542_2_0:
   8388 	case WM_T_82542_2_1:
   8389 		/* null */
   8390 		break;
   8391 	case WM_T_82543:
   8392 		/*
   8393 		 * With 82543, we need to force speed and duplex on the MAC
   8394 		 * equal to what the PHY speed and duplex configuration is.
   8395 		 * In addition, we need to perform a hardware reset on the PHY
   8396 		 * to take it out of reset.
   8397 		 */
   8398 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8399 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8400 
   8401 		/* The PHY reset pin is active-low. */
   8402 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8403 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8404 		    CTRL_EXT_SWDPIN(4));
   8405 		reg |= CTRL_EXT_SWDPIO(4);
   8406 
   8407 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8408 		CSR_WRITE_FLUSH(sc);
   8409 		delay(10*1000);
   8410 
   8411 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8412 		CSR_WRITE_FLUSH(sc);
   8413 		delay(150);
   8414 #if 0
   8415 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8416 #endif
   8417 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8418 		break;
   8419 	case WM_T_82544:	/* reset 10000us */
   8420 	case WM_T_82540:
   8421 	case WM_T_82545:
   8422 	case WM_T_82545_3:
   8423 	case WM_T_82546:
   8424 	case WM_T_82546_3:
   8425 	case WM_T_82541:
   8426 	case WM_T_82541_2:
   8427 	case WM_T_82547:
   8428 	case WM_T_82547_2:
   8429 	case WM_T_82571:	/* reset 100us */
   8430 	case WM_T_82572:
   8431 	case WM_T_82573:
   8432 	case WM_T_82574:
   8433 	case WM_T_82575:
   8434 	case WM_T_82576:
   8435 	case WM_T_82580:
   8436 	case WM_T_I350:
   8437 	case WM_T_I354:
   8438 	case WM_T_I210:
   8439 	case WM_T_I211:
   8440 	case WM_T_82583:
   8441 	case WM_T_80003:
   8442 		/* generic reset */
   8443 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8444 		CSR_WRITE_FLUSH(sc);
   8445 		delay(20000);
   8446 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8447 		CSR_WRITE_FLUSH(sc);
   8448 		delay(20000);
   8449 
   8450 		if ((sc->sc_type == WM_T_82541)
   8451 		    || (sc->sc_type == WM_T_82541_2)
   8452 		    || (sc->sc_type == WM_T_82547)
   8453 		    || (sc->sc_type == WM_T_82547_2)) {
   8454 			/* workaround for igp are done in igp_reset() */
   8455 			/* XXX add code to set LED after phy reset */
   8456 		}
   8457 		break;
   8458 	case WM_T_ICH8:
   8459 	case WM_T_ICH9:
   8460 	case WM_T_ICH10:
   8461 	case WM_T_PCH:
   8462 	case WM_T_PCH2:
   8463 	case WM_T_PCH_LPT:
   8464 	case WM_T_PCH_SPT:
   8465 		/* generic reset */
   8466 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8467 		CSR_WRITE_FLUSH(sc);
   8468 		delay(100);
   8469 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8470 		CSR_WRITE_FLUSH(sc);
   8471 		delay(150);
   8472 		break;
   8473 	default:
   8474 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8475 		    __func__);
   8476 		break;
   8477 	}
   8478 
   8479 	sc->phy.release(sc);
   8480 
   8481 	/* get_cfg_done */
   8482 	wm_get_cfg_done(sc);
   8483 
   8484 	/* extra setup */
   8485 	switch (sc->sc_type) {
   8486 	case WM_T_82542_2_0:
   8487 	case WM_T_82542_2_1:
   8488 	case WM_T_82543:
   8489 	case WM_T_82544:
   8490 	case WM_T_82540:
   8491 	case WM_T_82545:
   8492 	case WM_T_82545_3:
   8493 	case WM_T_82546:
   8494 	case WM_T_82546_3:
   8495 	case WM_T_82541_2:
   8496 	case WM_T_82547_2:
   8497 	case WM_T_82571:
   8498 	case WM_T_82572:
   8499 	case WM_T_82573:
   8500 	case WM_T_82575:
   8501 	case WM_T_82576:
   8502 	case WM_T_82580:
   8503 	case WM_T_I350:
   8504 	case WM_T_I354:
   8505 	case WM_T_I210:
   8506 	case WM_T_I211:
   8507 	case WM_T_80003:
   8508 		/* null */
   8509 		break;
   8510 	case WM_T_82574:
   8511 	case WM_T_82583:
   8512 		wm_lplu_d0_disable(sc);
   8513 		break;
   8514 	case WM_T_82541:
   8515 	case WM_T_82547:
   8516 		/* XXX Configure actively LED after PHY reset */
   8517 		break;
   8518 	case WM_T_ICH8:
   8519 	case WM_T_ICH9:
   8520 	case WM_T_ICH10:
   8521 	case WM_T_PCH:
   8522 	case WM_T_PCH2:
   8523 	case WM_T_PCH_LPT:
   8524 	case WM_T_PCH_SPT:
   8525 		/* Allow time for h/w to get to a quiescent state afer reset */
   8526 		delay(10*1000);
   8527 
   8528 		if (sc->sc_type == WM_T_PCH)
   8529 			wm_hv_phy_workaround_ich8lan(sc);
   8530 
   8531 		if (sc->sc_type == WM_T_PCH2)
   8532 			wm_lv_phy_workaround_ich8lan(sc);
   8533 
   8534 		/* Clear the host wakeup bit after lcd reset */
   8535 		if (sc->sc_type >= WM_T_PCH) {
   8536 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8537 			    BM_PORT_GEN_CFG);
   8538 			reg &= ~BM_WUC_HOST_WU_BIT;
   8539 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8540 			    BM_PORT_GEN_CFG, reg);
   8541 		}
   8542 
   8543 		/*
   8544 		 * XXX Configure the LCD with th extended configuration region
   8545 		 * in NVM
   8546 		 */
   8547 
   8548 		/* Disable D0 LPLU. */
   8549 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8550 			wm_lplu_d0_disable_pch(sc);
   8551 		else
   8552 			wm_lplu_d0_disable(sc);	/* ICH* */
   8553 		break;
   8554 	default:
   8555 		panic("%s: unknown type\n", __func__);
   8556 		break;
   8557 	}
   8558 }
   8559 
   8560 /*
   8561  * Setup sc_phytype and mii_{read|write}reg.
   8562  *
   8563  *  To identify PHY type, correct read/write function should be selected.
   8564  * To select correct read/write function, PCI ID or MAC type are required
   8565  * without accessing PHY registers.
   8566  *
   8567  *  On the first call of this function, PHY ID is not known yet. Check
   8568  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   8569  * result might be incorrect.
   8570  *
   8571  *  In the second call, PHY OUI and model is used to identify PHY type.
   8572  * It might not be perfpect because of the lack of compared entry, but it
   8573  * would be better than the first call.
   8574  *
   8575  *  If the detected new result and previous assumption is different,
   8576  * diagnous message will be printed.
   8577  */
   8578 static void
   8579 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   8580     uint16_t phy_model)
   8581 {
   8582 	device_t dev = sc->sc_dev;
   8583 	struct mii_data *mii = &sc->sc_mii;
   8584 	uint16_t new_phytype = WMPHY_UNKNOWN;
   8585 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   8586 	mii_readreg_t new_readreg;
   8587 	mii_writereg_t new_writereg;
   8588 
   8589 	if (mii->mii_readreg == NULL) {
   8590 		/*
   8591 		 *  This is the first call of this function. For ICH and PCH
   8592 		 * variants, it's difficult to determine the PHY access method
   8593 		 * by sc_type, so use the PCI product ID for some devices.
   8594 		 */
   8595 
   8596 		switch (sc->sc_pcidevid) {
   8597 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   8598 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   8599 			/* 82577 */
   8600 			new_phytype = WMPHY_82577;
   8601 			break;
   8602 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   8603 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   8604 			/* 82578 */
   8605 			new_phytype = WMPHY_82578;
   8606 			break;
   8607 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8608 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8609 			/* 82579 */
   8610 			new_phytype = WMPHY_82579;
   8611 			break;
   8612 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8613 		case PCI_PRODUCT_INTEL_82801I_BM:
   8614 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   8615 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8616 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8617 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8618 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8619 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8620 			/* ICH8, 9, 10 with 82567 */
   8621 			new_phytype = WMPHY_BM;
   8622 			break;
   8623 		default:
   8624 			break;
   8625 		}
   8626 	} else {
   8627 		/* It's not the first call. Use PHY OUI and model */
   8628 		switch (phy_oui) {
   8629 		case MII_OUI_ATHEROS: /* XXX ??? */
   8630 			switch (phy_model) {
   8631 			case 0x0004: /* XXX */
   8632 				new_phytype = WMPHY_82578;
   8633 				break;
   8634 			default:
   8635 				break;
   8636 			}
   8637 			break;
   8638 		case MII_OUI_xxMARVELL:
   8639 			switch (phy_model) {
   8640 			case MII_MODEL_xxMARVELL_I210:
   8641 				new_phytype = WMPHY_I210;
   8642 				break;
   8643 			case MII_MODEL_xxMARVELL_E1011:
   8644 			case MII_MODEL_xxMARVELL_E1000_3:
   8645 			case MII_MODEL_xxMARVELL_E1000_5:
   8646 			case MII_MODEL_xxMARVELL_E1112:
   8647 				new_phytype = WMPHY_M88;
   8648 				break;
   8649 			case MII_MODEL_xxMARVELL_E1149:
   8650 				new_phytype = WMPHY_BM;
   8651 				break;
   8652 			case MII_MODEL_xxMARVELL_E1111:
   8653 			case MII_MODEL_xxMARVELL_I347:
   8654 			case MII_MODEL_xxMARVELL_E1512:
   8655 			case MII_MODEL_xxMARVELL_E1340M:
   8656 			case MII_MODEL_xxMARVELL_E1543:
   8657 				new_phytype = WMPHY_M88;
   8658 				break;
   8659 			case MII_MODEL_xxMARVELL_I82563:
   8660 				new_phytype = WMPHY_GG82563;
   8661 				break;
   8662 			default:
   8663 				break;
   8664 			}
   8665 			break;
   8666 		case MII_OUI_INTEL:
   8667 			switch (phy_model) {
   8668 			case MII_MODEL_INTEL_I82577:
   8669 				new_phytype = WMPHY_82577;
   8670 				break;
   8671 			case MII_MODEL_INTEL_I82579:
   8672 				new_phytype = WMPHY_82579;
   8673 				break;
   8674 			case MII_MODEL_INTEL_I217:
   8675 				new_phytype = WMPHY_I217;
   8676 				break;
   8677 			case MII_MODEL_INTEL_I82580:
   8678 			case MII_MODEL_INTEL_I350:
   8679 				new_phytype = WMPHY_82580;
   8680 				break;
   8681 			default:
   8682 				break;
   8683 			}
   8684 			break;
   8685 		case MII_OUI_yyINTEL:
   8686 			switch (phy_model) {
   8687 			case MII_MODEL_yyINTEL_I82562G:
   8688 			case MII_MODEL_yyINTEL_I82562EM:
   8689 			case MII_MODEL_yyINTEL_I82562ET:
   8690 				new_phytype = WMPHY_IFE;
   8691 				break;
   8692 			case MII_MODEL_yyINTEL_IGP01E1000:
   8693 				new_phytype = WMPHY_IGP;
   8694 				break;
   8695 			case MII_MODEL_yyINTEL_I82566:
   8696 				new_phytype = WMPHY_IGP_3;
   8697 				break;
   8698 			default:
   8699 				break;
   8700 			}
   8701 			break;
   8702 		default:
   8703 			break;
   8704 		}
   8705 		if (new_phytype == WMPHY_UNKNOWN)
   8706 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   8707 			    __func__);
   8708 
   8709 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8710 		    && (sc->sc_phytype != new_phytype )) {
   8711 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   8712 			    "was incorrect. PHY type from PHY ID = %u\n",
   8713 			    sc->sc_phytype, new_phytype);
   8714 		}
   8715 	}
   8716 
   8717 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   8718 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   8719 		/* SGMII */
   8720 		new_readreg = wm_sgmii_readreg;
   8721 		new_writereg = wm_sgmii_writereg;
   8722 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   8723 		/* BM2 (phyaddr == 1) */
   8724 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8725 		    && (new_phytype != WMPHY_BM)
   8726 		    && (new_phytype != WMPHY_UNKNOWN))
   8727 			doubt_phytype = new_phytype;
   8728 		new_phytype = WMPHY_BM;
   8729 		new_readreg = wm_gmii_bm_readreg;
   8730 		new_writereg = wm_gmii_bm_writereg;
   8731 	} else if (sc->sc_type >= WM_T_PCH) {
   8732 		/* All PCH* use _hv_ */
   8733 		new_readreg = wm_gmii_hv_readreg;
   8734 		new_writereg = wm_gmii_hv_writereg;
   8735 	} else if (sc->sc_type >= WM_T_ICH8) {
   8736 		/* non-82567 ICH8, 9 and 10 */
   8737 		new_readreg = wm_gmii_i82544_readreg;
   8738 		new_writereg = wm_gmii_i82544_writereg;
   8739 	} else if (sc->sc_type >= WM_T_80003) {
   8740 		/* 80003 */
   8741 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8742 		    && (new_phytype != WMPHY_GG82563)
   8743 		    && (new_phytype != WMPHY_UNKNOWN))
   8744 			doubt_phytype = new_phytype;
   8745 		new_phytype = WMPHY_GG82563;
   8746 		new_readreg = wm_gmii_i80003_readreg;
   8747 		new_writereg = wm_gmii_i80003_writereg;
   8748 	} else if (sc->sc_type >= WM_T_I210) {
   8749 		/* I210 and I211 */
   8750 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8751 		    && (new_phytype != WMPHY_I210)
   8752 		    && (new_phytype != WMPHY_UNKNOWN))
   8753 			doubt_phytype = new_phytype;
   8754 		new_phytype = WMPHY_I210;
   8755 		new_readreg = wm_gmii_gs40g_readreg;
   8756 		new_writereg = wm_gmii_gs40g_writereg;
   8757 	} else if (sc->sc_type >= WM_T_82580) {
   8758 		/* 82580, I350 and I354 */
   8759 		new_readreg = wm_gmii_82580_readreg;
   8760 		new_writereg = wm_gmii_82580_writereg;
   8761 	} else if (sc->sc_type >= WM_T_82544) {
   8762 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8763 		new_readreg = wm_gmii_i82544_readreg;
   8764 		new_writereg = wm_gmii_i82544_writereg;
   8765 	} else {
   8766 		new_readreg = wm_gmii_i82543_readreg;
   8767 		new_writereg = wm_gmii_i82543_writereg;
   8768 	}
   8769 
   8770 	if (new_phytype == WMPHY_BM) {
   8771 		/* All BM use _bm_ */
   8772 		new_readreg = wm_gmii_bm_readreg;
   8773 		new_writereg = wm_gmii_bm_writereg;
   8774 	}
   8775 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8776 		/* All PCH* use _hv_ */
   8777 		new_readreg = wm_gmii_hv_readreg;
   8778 		new_writereg = wm_gmii_hv_writereg;
   8779 	}
   8780 
   8781 	/* Diag output */
   8782 	if (doubt_phytype != WMPHY_UNKNOWN)
   8783 		aprint_error_dev(dev, "Assumed new PHY type was "
   8784 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   8785 		    new_phytype);
   8786 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8787 	    && (sc->sc_phytype != new_phytype ))
   8788 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   8789 		    "was incorrect. New PHY type = %u\n",
   8790 		    sc->sc_phytype, new_phytype);
   8791 
   8792 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   8793 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   8794 
   8795 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   8796 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   8797 		    "function was incorrect.\n");
   8798 
   8799 	/* Update now */
   8800 	sc->sc_phytype = new_phytype;
   8801 	mii->mii_readreg = new_readreg;
   8802 	mii->mii_writereg = new_writereg;
   8803 }
   8804 
   8805 /*
   8806  * wm_get_phy_id_82575:
   8807  *
   8808  * Return PHY ID. Return -1 if it failed.
   8809  */
   8810 static int
   8811 wm_get_phy_id_82575(struct wm_softc *sc)
   8812 {
   8813 	uint32_t reg;
   8814 	int phyid = -1;
   8815 
   8816 	/* XXX */
   8817 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8818 		return -1;
   8819 
   8820 	if (wm_sgmii_uses_mdio(sc)) {
   8821 		switch (sc->sc_type) {
   8822 		case WM_T_82575:
   8823 		case WM_T_82576:
   8824 			reg = CSR_READ(sc, WMREG_MDIC);
   8825 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8826 			break;
   8827 		case WM_T_82580:
   8828 		case WM_T_I350:
   8829 		case WM_T_I354:
   8830 		case WM_T_I210:
   8831 		case WM_T_I211:
   8832 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8833 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8834 			break;
   8835 		default:
   8836 			return -1;
   8837 		}
   8838 	}
   8839 
   8840 	return phyid;
   8841 }
   8842 
   8843 
   8844 /*
   8845  * wm_gmii_mediainit:
   8846  *
   8847  *	Initialize media for use on 1000BASE-T devices.
   8848  */
   8849 static void
   8850 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8851 {
   8852 	device_t dev = sc->sc_dev;
   8853 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8854 	struct mii_data *mii = &sc->sc_mii;
   8855 	uint32_t reg;
   8856 
   8857 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8858 		device_xname(sc->sc_dev), __func__));
   8859 
   8860 	/* We have GMII. */
   8861 	sc->sc_flags |= WM_F_HAS_MII;
   8862 
   8863 	if (sc->sc_type == WM_T_80003)
   8864 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8865 	else
   8866 		sc->sc_tipg = TIPG_1000T_DFLT;
   8867 
   8868 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8869 	if ((sc->sc_type == WM_T_82580)
   8870 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8871 	    || (sc->sc_type == WM_T_I211)) {
   8872 		reg = CSR_READ(sc, WMREG_PHPM);
   8873 		reg &= ~PHPM_GO_LINK_D;
   8874 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8875 	}
   8876 
   8877 	/*
   8878 	 * Let the chip set speed/duplex on its own based on
   8879 	 * signals from the PHY.
   8880 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8881 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8882 	 */
   8883 	sc->sc_ctrl |= CTRL_SLU;
   8884 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8885 
   8886 	/* Initialize our media structures and probe the GMII. */
   8887 	mii->mii_ifp = ifp;
   8888 
   8889 	/*
   8890 	 * The first call of wm_mii_setup_phytype. The result might be
   8891 	 * incorrect.
   8892 	 */
   8893 	wm_gmii_setup_phytype(sc, 0, 0);
   8894 
   8895 	mii->mii_statchg = wm_gmii_statchg;
   8896 
   8897 	/* get PHY control from SMBus to PCIe */
   8898 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   8899 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   8900 		wm_smbustopci(sc);
   8901 
   8902 	wm_gmii_reset(sc);
   8903 
   8904 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8905 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8906 	    wm_gmii_mediastatus);
   8907 
   8908 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8909 	    || (sc->sc_type == WM_T_82580)
   8910 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8911 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8912 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8913 			/* Attach only one port */
   8914 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8915 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8916 		} else {
   8917 			int i, id;
   8918 			uint32_t ctrl_ext;
   8919 
   8920 			id = wm_get_phy_id_82575(sc);
   8921 			if (id != -1) {
   8922 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8923 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8924 			}
   8925 			if ((id == -1)
   8926 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8927 				/* Power on sgmii phy if it is disabled */
   8928 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8929 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8930 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8931 				CSR_WRITE_FLUSH(sc);
   8932 				delay(300*1000); /* XXX too long */
   8933 
   8934 				/* from 1 to 8 */
   8935 				for (i = 1; i < 8; i++)
   8936 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8937 					    0xffffffff, i, MII_OFFSET_ANY,
   8938 					    MIIF_DOPAUSE);
   8939 
   8940 				/* restore previous sfp cage power state */
   8941 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8942 			}
   8943 		}
   8944 	} else {
   8945 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8946 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8947 	}
   8948 
   8949 	/*
   8950 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8951 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8952 	 */
   8953 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8954 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8955 		wm_set_mdio_slow_mode_hv(sc);
   8956 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8957 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8958 	}
   8959 
   8960 	/*
   8961 	 * (For ICH8 variants)
   8962 	 * If PHY detection failed, use BM's r/w function and retry.
   8963 	 */
   8964 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8965 		/* if failed, retry with *_bm_* */
   8966 		aprint_verbose_dev(dev, "Assumed PHY access function "
   8967 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   8968 		    sc->sc_phytype);
   8969 		sc->sc_phytype = WMPHY_BM;
   8970 		mii->mii_readreg = wm_gmii_bm_readreg;
   8971 		mii->mii_writereg = wm_gmii_bm_writereg;
   8972 
   8973 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8974 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8975 	}
   8976 
   8977 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8978 		/* Any PHY wasn't find */
   8979 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8980 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8981 		sc->sc_phytype = WMPHY_NONE;
   8982 	} else {
   8983 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   8984 
   8985 		/*
   8986 		 * PHY Found! Check PHY type again by the second call of
   8987 		 * wm_mii_setup_phytype.
   8988 		 */
   8989 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   8990 		    child->mii_mpd_model);
   8991 
   8992 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8993 	}
   8994 }
   8995 
   8996 /*
   8997  * wm_gmii_mediachange:	[ifmedia interface function]
   8998  *
   8999  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9000  */
   9001 static int
   9002 wm_gmii_mediachange(struct ifnet *ifp)
   9003 {
   9004 	struct wm_softc *sc = ifp->if_softc;
   9005 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9006 	int rc;
   9007 
   9008 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9009 		device_xname(sc->sc_dev), __func__));
   9010 	if ((ifp->if_flags & IFF_UP) == 0)
   9011 		return 0;
   9012 
   9013 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9014 	sc->sc_ctrl |= CTRL_SLU;
   9015 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9016 	    || (sc->sc_type > WM_T_82543)) {
   9017 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9018 	} else {
   9019 		sc->sc_ctrl &= ~CTRL_ASDE;
   9020 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9021 		if (ife->ifm_media & IFM_FDX)
   9022 			sc->sc_ctrl |= CTRL_FD;
   9023 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9024 		case IFM_10_T:
   9025 			sc->sc_ctrl |= CTRL_SPEED_10;
   9026 			break;
   9027 		case IFM_100_TX:
   9028 			sc->sc_ctrl |= CTRL_SPEED_100;
   9029 			break;
   9030 		case IFM_1000_T:
   9031 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9032 			break;
   9033 		default:
   9034 			panic("wm_gmii_mediachange: bad media 0x%x",
   9035 			    ife->ifm_media);
   9036 		}
   9037 	}
   9038 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9039 	if (sc->sc_type <= WM_T_82543)
   9040 		wm_gmii_reset(sc);
   9041 
   9042 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9043 		return 0;
   9044 	return rc;
   9045 }
   9046 
   9047 /*
   9048  * wm_gmii_mediastatus:	[ifmedia interface function]
   9049  *
   9050  *	Get the current interface media status on a 1000BASE-T device.
   9051  */
   9052 static void
   9053 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9054 {
   9055 	struct wm_softc *sc = ifp->if_softc;
   9056 
   9057 	ether_mediastatus(ifp, ifmr);
   9058 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9059 	    | sc->sc_flowflags;
   9060 }
   9061 
   9062 #define	MDI_IO		CTRL_SWDPIN(2)
   9063 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9064 #define	MDI_CLK		CTRL_SWDPIN(3)
   9065 
   9066 static void
   9067 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9068 {
   9069 	uint32_t i, v;
   9070 
   9071 	v = CSR_READ(sc, WMREG_CTRL);
   9072 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9073 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9074 
   9075 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9076 		if (data & i)
   9077 			v |= MDI_IO;
   9078 		else
   9079 			v &= ~MDI_IO;
   9080 		CSR_WRITE(sc, WMREG_CTRL, v);
   9081 		CSR_WRITE_FLUSH(sc);
   9082 		delay(10);
   9083 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9084 		CSR_WRITE_FLUSH(sc);
   9085 		delay(10);
   9086 		CSR_WRITE(sc, WMREG_CTRL, v);
   9087 		CSR_WRITE_FLUSH(sc);
   9088 		delay(10);
   9089 	}
   9090 }
   9091 
   9092 static uint32_t
   9093 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9094 {
   9095 	uint32_t v, i, data = 0;
   9096 
   9097 	v = CSR_READ(sc, WMREG_CTRL);
   9098 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9099 	v |= CTRL_SWDPIO(3);
   9100 
   9101 	CSR_WRITE(sc, WMREG_CTRL, v);
   9102 	CSR_WRITE_FLUSH(sc);
   9103 	delay(10);
   9104 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9105 	CSR_WRITE_FLUSH(sc);
   9106 	delay(10);
   9107 	CSR_WRITE(sc, WMREG_CTRL, v);
   9108 	CSR_WRITE_FLUSH(sc);
   9109 	delay(10);
   9110 
   9111 	for (i = 0; i < 16; i++) {
   9112 		data <<= 1;
   9113 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9114 		CSR_WRITE_FLUSH(sc);
   9115 		delay(10);
   9116 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9117 			data |= 1;
   9118 		CSR_WRITE(sc, WMREG_CTRL, v);
   9119 		CSR_WRITE_FLUSH(sc);
   9120 		delay(10);
   9121 	}
   9122 
   9123 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9124 	CSR_WRITE_FLUSH(sc);
   9125 	delay(10);
   9126 	CSR_WRITE(sc, WMREG_CTRL, v);
   9127 	CSR_WRITE_FLUSH(sc);
   9128 	delay(10);
   9129 
   9130 	return data;
   9131 }
   9132 
   9133 #undef MDI_IO
   9134 #undef MDI_DIR
   9135 #undef MDI_CLK
   9136 
   9137 /*
   9138  * wm_gmii_i82543_readreg:	[mii interface function]
   9139  *
   9140  *	Read a PHY register on the GMII (i82543 version).
   9141  */
   9142 static int
   9143 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   9144 {
   9145 	struct wm_softc *sc = device_private(self);
   9146 	int rv;
   9147 
   9148 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9149 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9150 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9151 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9152 
   9153 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9154 	    device_xname(sc->sc_dev), phy, reg, rv));
   9155 
   9156 	return rv;
   9157 }
   9158 
   9159 /*
   9160  * wm_gmii_i82543_writereg:	[mii interface function]
   9161  *
   9162  *	Write a PHY register on the GMII (i82543 version).
   9163  */
   9164 static void
   9165 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   9166 {
   9167 	struct wm_softc *sc = device_private(self);
   9168 
   9169 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9170 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9171 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9172 	    (MII_COMMAND_START << 30), 32);
   9173 }
   9174 
   9175 /*
   9176  * wm_gmii_mdic_readreg:	[mii interface function]
   9177  *
   9178  *	Read a PHY register on the GMII.
   9179  */
   9180 static int
   9181 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   9182 {
   9183 	struct wm_softc *sc = device_private(self);
   9184 	uint32_t mdic = 0;
   9185 	int i, rv;
   9186 
   9187 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9188 	    MDIC_REGADD(reg));
   9189 
   9190 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9191 		mdic = CSR_READ(sc, WMREG_MDIC);
   9192 		if (mdic & MDIC_READY)
   9193 			break;
   9194 		delay(50);
   9195 	}
   9196 
   9197 	if ((mdic & MDIC_READY) == 0) {
   9198 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9199 		    device_xname(sc->sc_dev), phy, reg);
   9200 		rv = 0;
   9201 	} else if (mdic & MDIC_E) {
   9202 #if 0 /* This is normal if no PHY is present. */
   9203 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9204 		    device_xname(sc->sc_dev), phy, reg);
   9205 #endif
   9206 		rv = 0;
   9207 	} else {
   9208 		rv = MDIC_DATA(mdic);
   9209 		if (rv == 0xffff)
   9210 			rv = 0;
   9211 	}
   9212 
   9213 	return rv;
   9214 }
   9215 
   9216 /*
   9217  * wm_gmii_mdic_writereg:	[mii interface function]
   9218  *
   9219  *	Write a PHY register on the GMII.
   9220  */
   9221 static void
   9222 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   9223 {
   9224 	struct wm_softc *sc = device_private(self);
   9225 	uint32_t mdic = 0;
   9226 	int i;
   9227 
   9228 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9229 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9230 
   9231 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9232 		mdic = CSR_READ(sc, WMREG_MDIC);
   9233 		if (mdic & MDIC_READY)
   9234 			break;
   9235 		delay(50);
   9236 	}
   9237 
   9238 	if ((mdic & MDIC_READY) == 0)
   9239 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9240 		    device_xname(sc->sc_dev), phy, reg);
   9241 	else if (mdic & MDIC_E)
   9242 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9243 		    device_xname(sc->sc_dev), phy, reg);
   9244 }
   9245 
   9246 /*
   9247  * wm_gmii_i82544_readreg:	[mii interface function]
   9248  *
   9249  *	Read a PHY register on the GMII.
   9250  */
   9251 static int
   9252 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   9253 {
   9254 	struct wm_softc *sc = device_private(self);
   9255 	int rv;
   9256 
   9257 	if (sc->phy.acquire(sc)) {
   9258 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9259 		    __func__);
   9260 		return 0;
   9261 	}
   9262 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9263 	sc->phy.release(sc);
   9264 
   9265 	return rv;
   9266 }
   9267 
   9268 /*
   9269  * wm_gmii_i82544_writereg:	[mii interface function]
   9270  *
   9271  *	Write a PHY register on the GMII.
   9272  */
   9273 static void
   9274 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   9275 {
   9276 	struct wm_softc *sc = device_private(self);
   9277 
   9278 	if (sc->phy.acquire(sc)) {
   9279 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9280 		    __func__);
   9281 	}
   9282 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9283 	sc->phy.release(sc);
   9284 }
   9285 
   9286 /*
   9287  * wm_gmii_i80003_readreg:	[mii interface function]
   9288  *
   9289  *	Read a PHY register on the kumeran
   9290  * This could be handled by the PHY layer if we didn't have to lock the
   9291  * ressource ...
   9292  */
   9293 static int
   9294 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   9295 {
   9296 	struct wm_softc *sc = device_private(self);
   9297 	int rv;
   9298 
   9299 	if (phy != 1) /* only one PHY on kumeran bus */
   9300 		return 0;
   9301 
   9302 	if (sc->phy.acquire(sc)) {
   9303 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9304 		    __func__);
   9305 		return 0;
   9306 	}
   9307 
   9308 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9309 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9310 		    reg >> GG82563_PAGE_SHIFT);
   9311 	} else {
   9312 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9313 		    reg >> GG82563_PAGE_SHIFT);
   9314 	}
   9315 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9316 	delay(200);
   9317 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9318 	delay(200);
   9319 	sc->phy.release(sc);
   9320 
   9321 	return rv;
   9322 }
   9323 
   9324 /*
   9325  * wm_gmii_i80003_writereg:	[mii interface function]
   9326  *
   9327  *	Write a PHY register on the kumeran.
   9328  * This could be handled by the PHY layer if we didn't have to lock the
   9329  * ressource ...
   9330  */
   9331 static void
   9332 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   9333 {
   9334 	struct wm_softc *sc = device_private(self);
   9335 
   9336 	if (phy != 1) /* only one PHY on kumeran bus */
   9337 		return;
   9338 
   9339 	if (sc->phy.acquire(sc)) {
   9340 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9341 		    __func__);
   9342 		return;
   9343 	}
   9344 
   9345 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9346 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9347 		    reg >> GG82563_PAGE_SHIFT);
   9348 	} else {
   9349 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9350 		    reg >> GG82563_PAGE_SHIFT);
   9351 	}
   9352 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9353 	delay(200);
   9354 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9355 	delay(200);
   9356 
   9357 	sc->phy.release(sc);
   9358 }
   9359 
   9360 /*
   9361  * wm_gmii_bm_readreg:	[mii interface function]
   9362  *
   9363  *	Read a PHY register on the kumeran
   9364  * This could be handled by the PHY layer if we didn't have to lock the
   9365  * ressource ...
   9366  */
   9367 static int
   9368 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   9369 {
   9370 	struct wm_softc *sc = device_private(self);
   9371 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9372 	uint16_t val;
   9373 	int rv;
   9374 
   9375 	if (sc->phy.acquire(sc)) {
   9376 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9377 		    __func__);
   9378 		return 0;
   9379 	}
   9380 
   9381 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9382 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9383 		    || (reg == 31)) ? 1 : phy;
   9384 	/* Page 800 works differently than the rest so it has its own func */
   9385 	if (page == BM_WUC_PAGE) {
   9386 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9387 		rv = val;
   9388 		goto release;
   9389 	}
   9390 
   9391 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9392 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9393 		    && (sc->sc_type != WM_T_82583))
   9394 			wm_gmii_mdic_writereg(self, phy,
   9395 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9396 		else
   9397 			wm_gmii_mdic_writereg(self, phy,
   9398 			    BME1000_PHY_PAGE_SELECT, page);
   9399 	}
   9400 
   9401 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9402 
   9403 release:
   9404 	sc->phy.release(sc);
   9405 	return rv;
   9406 }
   9407 
   9408 /*
   9409  * wm_gmii_bm_writereg:	[mii interface function]
   9410  *
   9411  *	Write a PHY register on the kumeran.
   9412  * This could be handled by the PHY layer if we didn't have to lock the
   9413  * ressource ...
   9414  */
   9415 static void
   9416 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9417 {
   9418 	struct wm_softc *sc = device_private(self);
   9419 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9420 
   9421 	if (sc->phy.acquire(sc)) {
   9422 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9423 		    __func__);
   9424 		return;
   9425 	}
   9426 
   9427 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9428 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9429 		    || (reg == 31)) ? 1 : phy;
   9430 	/* Page 800 works differently than the rest so it has its own func */
   9431 	if (page == BM_WUC_PAGE) {
   9432 		uint16_t tmp;
   9433 
   9434 		tmp = val;
   9435 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9436 		goto release;
   9437 	}
   9438 
   9439 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9440 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9441 		    && (sc->sc_type != WM_T_82583))
   9442 			wm_gmii_mdic_writereg(self, phy,
   9443 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9444 		else
   9445 			wm_gmii_mdic_writereg(self, phy,
   9446 			    BME1000_PHY_PAGE_SELECT, page);
   9447 	}
   9448 
   9449 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9450 
   9451 release:
   9452 	sc->phy.release(sc);
   9453 }
   9454 
   9455 static void
   9456 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9457 {
   9458 	struct wm_softc *sc = device_private(self);
   9459 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9460 	uint16_t wuce, reg;
   9461 
   9462 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9463 		device_xname(sc->sc_dev), __func__));
   9464 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9465 	if (sc->sc_type == WM_T_PCH) {
   9466 		/* XXX e1000 driver do nothing... why? */
   9467 	}
   9468 
   9469 	/*
   9470 	 * 1) Enable PHY wakeup register first.
   9471 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9472 	 */
   9473 
   9474 	/* Set page 769 */
   9475 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9476 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9477 
   9478 	/* Read WUCE and save it */
   9479 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9480 
   9481 	reg = wuce | BM_WUC_ENABLE_BIT;
   9482 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9483 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9484 
   9485 	/* Select page 800 */
   9486 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9487 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9488 
   9489 	/*
   9490 	 * 2) Access PHY wakeup register.
   9491 	 * See e1000_access_phy_wakeup_reg_bm.
   9492 	 */
   9493 
   9494 	/* Write page 800 */
   9495 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9496 
   9497 	if (rd)
   9498 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9499 	else
   9500 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9501 
   9502 	/*
   9503 	 * 3) Disable PHY wakeup register.
   9504 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9505 	 */
   9506 	/* Set page 769 */
   9507 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9508 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9509 
   9510 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9511 }
   9512 
   9513 /*
   9514  * wm_gmii_hv_readreg:	[mii interface function]
   9515  *
   9516  *	Read a PHY register on the kumeran
   9517  * This could be handled by the PHY layer if we didn't have to lock the
   9518  * ressource ...
   9519  */
   9520 static int
   9521 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9522 {
   9523 	struct wm_softc *sc = device_private(self);
   9524 	int rv;
   9525 
   9526 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9527 		device_xname(sc->sc_dev), __func__));
   9528 	if (sc->phy.acquire(sc)) {
   9529 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9530 		    __func__);
   9531 		return 0;
   9532 	}
   9533 
   9534 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9535 	sc->phy.release(sc);
   9536 	return rv;
   9537 }
   9538 
   9539 static int
   9540 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9541 {
   9542 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9543 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9544 	uint16_t val;
   9545 	int rv;
   9546 
   9547 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9548 
   9549 	/* Page 800 works differently than the rest so it has its own func */
   9550 	if (page == BM_WUC_PAGE) {
   9551 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9552 		return val;
   9553 	}
   9554 
   9555 	/*
   9556 	 * Lower than page 768 works differently than the rest so it has its
   9557 	 * own func
   9558 	 */
   9559 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9560 		printf("gmii_hv_readreg!!!\n");
   9561 		return 0;
   9562 	}
   9563 
   9564 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9565 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9566 		    page << BME1000_PAGE_SHIFT);
   9567 	}
   9568 
   9569 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9570 	return rv;
   9571 }
   9572 
   9573 /*
   9574  * wm_gmii_hv_writereg:	[mii interface function]
   9575  *
   9576  *	Write a PHY register on the kumeran.
   9577  * This could be handled by the PHY layer if we didn't have to lock the
   9578  * ressource ...
   9579  */
   9580 static void
   9581 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9582 {
   9583 	struct wm_softc *sc = device_private(self);
   9584 
   9585 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9586 		device_xname(sc->sc_dev), __func__));
   9587 
   9588 	if (sc->phy.acquire(sc)) {
   9589 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9590 		    __func__);
   9591 		return;
   9592 	}
   9593 
   9594 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9595 	sc->phy.release(sc);
   9596 }
   9597 
   9598 static void
   9599 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9600 {
   9601 	struct wm_softc *sc = device_private(self);
   9602 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9603 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9604 
   9605 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9606 
   9607 	/* Page 800 works differently than the rest so it has its own func */
   9608 	if (page == BM_WUC_PAGE) {
   9609 		uint16_t tmp;
   9610 
   9611 		tmp = val;
   9612 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9613 		return;
   9614 	}
   9615 
   9616 	/*
   9617 	 * Lower than page 768 works differently than the rest so it has its
   9618 	 * own func
   9619 	 */
   9620 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9621 		printf("gmii_hv_writereg!!!\n");
   9622 		return;
   9623 	}
   9624 
   9625 	{
   9626 		/*
   9627 		 * XXX Workaround MDIO accesses being disabled after entering
   9628 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9629 		 * register is set)
   9630 		 */
   9631 		if (sc->sc_phytype == WMPHY_82578) {
   9632 			struct mii_softc *child;
   9633 
   9634 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9635 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9636 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9637 			    && ((val & (1 << 11)) != 0)) {
   9638 				printf("XXX need workaround\n");
   9639 			}
   9640 		}
   9641 
   9642 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9643 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9644 			    page << BME1000_PAGE_SHIFT);
   9645 		}
   9646 	}
   9647 
   9648 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   9649 }
   9650 
   9651 /*
   9652  * wm_gmii_82580_readreg:	[mii interface function]
   9653  *
   9654  *	Read a PHY register on the 82580 and I350.
   9655  * This could be handled by the PHY layer if we didn't have to lock the
   9656  * ressource ...
   9657  */
   9658 static int
   9659 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   9660 {
   9661 	struct wm_softc *sc = device_private(self);
   9662 	int rv;
   9663 
   9664 	if (sc->phy.acquire(sc) != 0) {
   9665 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9666 		    __func__);
   9667 		return 0;
   9668 	}
   9669 
   9670 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9671 
   9672 	sc->phy.release(sc);
   9673 	return rv;
   9674 }
   9675 
   9676 /*
   9677  * wm_gmii_82580_writereg:	[mii interface function]
   9678  *
   9679  *	Write a PHY register on the 82580 and I350.
   9680  * This could be handled by the PHY layer if we didn't have to lock the
   9681  * ressource ...
   9682  */
   9683 static void
   9684 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   9685 {
   9686 	struct wm_softc *sc = device_private(self);
   9687 
   9688 	if (sc->phy.acquire(sc) != 0) {
   9689 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9690 		    __func__);
   9691 		return;
   9692 	}
   9693 
   9694 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9695 
   9696 	sc->phy.release(sc);
   9697 }
   9698 
   9699 /*
   9700  * wm_gmii_gs40g_readreg:	[mii interface function]
   9701  *
   9702  *	Read a PHY register on the I2100 and I211.
   9703  * This could be handled by the PHY layer if we didn't have to lock the
   9704  * ressource ...
   9705  */
   9706 static int
   9707 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   9708 {
   9709 	struct wm_softc *sc = device_private(self);
   9710 	int page, offset;
   9711 	int rv;
   9712 
   9713 	/* Acquire semaphore */
   9714 	if (sc->phy.acquire(sc)) {
   9715 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9716 		    __func__);
   9717 		return 0;
   9718 	}
   9719 
   9720 	/* Page select */
   9721 	page = reg >> GS40G_PAGE_SHIFT;
   9722 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9723 
   9724 	/* Read reg */
   9725 	offset = reg & GS40G_OFFSET_MASK;
   9726 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   9727 
   9728 	sc->phy.release(sc);
   9729 	return rv;
   9730 }
   9731 
   9732 /*
   9733  * wm_gmii_gs40g_writereg:	[mii interface function]
   9734  *
   9735  *	Write a PHY register on the I210 and I211.
   9736  * This could be handled by the PHY layer if we didn't have to lock the
   9737  * ressource ...
   9738  */
   9739 static void
   9740 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   9741 {
   9742 	struct wm_softc *sc = device_private(self);
   9743 	int page, offset;
   9744 
   9745 	/* Acquire semaphore */
   9746 	if (sc->phy.acquire(sc)) {
   9747 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9748 		    __func__);
   9749 		return;
   9750 	}
   9751 
   9752 	/* Page select */
   9753 	page = reg >> GS40G_PAGE_SHIFT;
   9754 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9755 
   9756 	/* Write reg */
   9757 	offset = reg & GS40G_OFFSET_MASK;
   9758 	wm_gmii_mdic_writereg(self, phy, offset, val);
   9759 
   9760 	/* Release semaphore */
   9761 	sc->phy.release(sc);
   9762 }
   9763 
   9764 /*
   9765  * wm_gmii_statchg:	[mii interface function]
   9766  *
   9767  *	Callback from MII layer when media changes.
   9768  */
   9769 static void
   9770 wm_gmii_statchg(struct ifnet *ifp)
   9771 {
   9772 	struct wm_softc *sc = ifp->if_softc;
   9773 	struct mii_data *mii = &sc->sc_mii;
   9774 
   9775 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   9776 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9777 	sc->sc_fcrtl &= ~FCRTL_XONE;
   9778 
   9779 	/*
   9780 	 * Get flow control negotiation result.
   9781 	 */
   9782 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   9783 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   9784 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   9785 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   9786 	}
   9787 
   9788 	if (sc->sc_flowflags & IFM_FLOW) {
   9789 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   9790 			sc->sc_ctrl |= CTRL_TFCE;
   9791 			sc->sc_fcrtl |= FCRTL_XONE;
   9792 		}
   9793 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   9794 			sc->sc_ctrl |= CTRL_RFCE;
   9795 	}
   9796 
   9797 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   9798 		DPRINTF(WM_DEBUG_LINK,
   9799 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   9800 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9801 	} else {
   9802 		DPRINTF(WM_DEBUG_LINK,
   9803 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   9804 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9805 	}
   9806 
   9807 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9808 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9809 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   9810 						 : WMREG_FCRTL, sc->sc_fcrtl);
   9811 	if (sc->sc_type == WM_T_80003) {
   9812 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   9813 		case IFM_1000_T:
   9814 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9815 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   9816 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9817 			break;
   9818 		default:
   9819 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9820 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   9821 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   9822 			break;
   9823 		}
   9824 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   9825 	}
   9826 }
   9827 
   9828 /* kumeran related (80003, ICH* and PCH*) */
   9829 
   9830 /*
   9831  * wm_kmrn_readreg:
   9832  *
   9833  *	Read a kumeran register
   9834  */
   9835 static int
   9836 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   9837 {
   9838 	int rv;
   9839 
   9840 	if (sc->sc_type == WM_T_80003)
   9841 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9842 	else
   9843 		rv = sc->phy.acquire(sc);
   9844 	if (rv != 0) {
   9845 		aprint_error_dev(sc->sc_dev,
   9846 		    "%s: failed to get semaphore\n", __func__);
   9847 		return 0;
   9848 	}
   9849 
   9850 	rv = wm_kmrn_readreg_locked(sc, reg);
   9851 
   9852 	if (sc->sc_type == WM_T_80003)
   9853 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9854 	else
   9855 		sc->phy.release(sc);
   9856 
   9857 	return rv;
   9858 }
   9859 
   9860 static int
   9861 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   9862 {
   9863 	int rv;
   9864 
   9865 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9866 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9867 	    KUMCTRLSTA_REN);
   9868 	CSR_WRITE_FLUSH(sc);
   9869 	delay(2);
   9870 
   9871 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9872 
   9873 	return rv;
   9874 }
   9875 
   9876 /*
   9877  * wm_kmrn_writereg:
   9878  *
   9879  *	Write a kumeran register
   9880  */
   9881 static void
   9882 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9883 {
   9884 	int rv;
   9885 
   9886 	if (sc->sc_type == WM_T_80003)
   9887 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9888 	else
   9889 		rv = sc->phy.acquire(sc);
   9890 	if (rv != 0) {
   9891 		aprint_error_dev(sc->sc_dev,
   9892 		    "%s: failed to get semaphore\n", __func__);
   9893 		return;
   9894 	}
   9895 
   9896 	wm_kmrn_writereg_locked(sc, reg, val);
   9897 
   9898 	if (sc->sc_type == WM_T_80003)
   9899 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9900 	else
   9901 		sc->phy.release(sc);
   9902 }
   9903 
   9904 static void
   9905 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   9906 {
   9907 
   9908 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9909 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9910 	    (val & KUMCTRLSTA_MASK));
   9911 }
   9912 
   9913 /* SGMII related */
   9914 
   9915 /*
   9916  * wm_sgmii_uses_mdio
   9917  *
   9918  * Check whether the transaction is to the internal PHY or the external
   9919  * MDIO interface. Return true if it's MDIO.
   9920  */
   9921 static bool
   9922 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9923 {
   9924 	uint32_t reg;
   9925 	bool ismdio = false;
   9926 
   9927 	switch (sc->sc_type) {
   9928 	case WM_T_82575:
   9929 	case WM_T_82576:
   9930 		reg = CSR_READ(sc, WMREG_MDIC);
   9931 		ismdio = ((reg & MDIC_DEST) != 0);
   9932 		break;
   9933 	case WM_T_82580:
   9934 	case WM_T_I350:
   9935 	case WM_T_I354:
   9936 	case WM_T_I210:
   9937 	case WM_T_I211:
   9938 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9939 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9940 		break;
   9941 	default:
   9942 		break;
   9943 	}
   9944 
   9945 	return ismdio;
   9946 }
   9947 
   9948 /*
   9949  * wm_sgmii_readreg:	[mii interface function]
   9950  *
   9951  *	Read a PHY register on the SGMII
   9952  * This could be handled by the PHY layer if we didn't have to lock the
   9953  * ressource ...
   9954  */
   9955 static int
   9956 wm_sgmii_readreg(device_t self, int phy, int reg)
   9957 {
   9958 	struct wm_softc *sc = device_private(self);
   9959 	uint32_t i2ccmd;
   9960 	int i, rv;
   9961 
   9962 	if (sc->phy.acquire(sc)) {
   9963 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9964 		    __func__);
   9965 		return 0;
   9966 	}
   9967 
   9968 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9969 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9970 	    | I2CCMD_OPCODE_READ;
   9971 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9972 
   9973 	/* Poll the ready bit */
   9974 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9975 		delay(50);
   9976 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9977 		if (i2ccmd & I2CCMD_READY)
   9978 			break;
   9979 	}
   9980 	if ((i2ccmd & I2CCMD_READY) == 0)
   9981 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9982 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9983 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9984 
   9985 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9986 
   9987 	sc->phy.release(sc);
   9988 	return rv;
   9989 }
   9990 
   9991 /*
   9992  * wm_sgmii_writereg:	[mii interface function]
   9993  *
   9994  *	Write a PHY register on the SGMII.
   9995  * This could be handled by the PHY layer if we didn't have to lock the
   9996  * ressource ...
   9997  */
   9998 static void
   9999 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   10000 {
   10001 	struct wm_softc *sc = device_private(self);
   10002 	uint32_t i2ccmd;
   10003 	int i;
   10004 	int val_swapped;
   10005 
   10006 	if (sc->phy.acquire(sc) != 0) {
   10007 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10008 		    __func__);
   10009 		return;
   10010 	}
   10011 	/* Swap the data bytes for the I2C interface */
   10012 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10013 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10014 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10015 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10016 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10017 
   10018 	/* Poll the ready bit */
   10019 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10020 		delay(50);
   10021 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10022 		if (i2ccmd & I2CCMD_READY)
   10023 			break;
   10024 	}
   10025 	if ((i2ccmd & I2CCMD_READY) == 0)
   10026 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   10027 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10028 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10029 
   10030 	sc->phy.release(sc);
   10031 }
   10032 
   10033 /* TBI related */
   10034 
   10035 /*
   10036  * wm_tbi_mediainit:
   10037  *
   10038  *	Initialize media for use on 1000BASE-X devices.
   10039  */
   10040 static void
   10041 wm_tbi_mediainit(struct wm_softc *sc)
   10042 {
   10043 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10044 	const char *sep = "";
   10045 
   10046 	if (sc->sc_type < WM_T_82543)
   10047 		sc->sc_tipg = TIPG_WM_DFLT;
   10048 	else
   10049 		sc->sc_tipg = TIPG_LG_DFLT;
   10050 
   10051 	sc->sc_tbi_serdes_anegticks = 5;
   10052 
   10053 	/* Initialize our media structures */
   10054 	sc->sc_mii.mii_ifp = ifp;
   10055 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10056 
   10057 	if ((sc->sc_type >= WM_T_82575)
   10058 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10059 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10060 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10061 	else
   10062 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10063 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10064 
   10065 	/*
   10066 	 * SWD Pins:
   10067 	 *
   10068 	 *	0 = Link LED (output)
   10069 	 *	1 = Loss Of Signal (input)
   10070 	 */
   10071 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10072 
   10073 	/* XXX Perhaps this is only for TBI */
   10074 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10075 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10076 
   10077 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10078 		sc->sc_ctrl &= ~CTRL_LRST;
   10079 
   10080 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10081 
   10082 #define	ADD(ss, mm, dd)							\
   10083 do {									\
   10084 	aprint_normal("%s%s", sep, ss);					\
   10085 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10086 	sep = ", ";							\
   10087 } while (/*CONSTCOND*/0)
   10088 
   10089 	aprint_normal_dev(sc->sc_dev, "");
   10090 
   10091 	if (sc->sc_type == WM_T_I354) {
   10092 		uint32_t status;
   10093 
   10094 		status = CSR_READ(sc, WMREG_STATUS);
   10095 		if (((status & STATUS_2P5_SKU) != 0)
   10096 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10097 			ADD("2500baseKX-FDX", IFM_2500_SX | IFM_FDX,ANAR_X_FD);
   10098 		} else
   10099 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX,ANAR_X_FD);
   10100 	} else if (sc->sc_type == WM_T_82545) {
   10101 		/* Only 82545 is LX (XXX except SFP) */
   10102 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10103 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10104 	} else {
   10105 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10106 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10107 	}
   10108 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10109 	aprint_normal("\n");
   10110 
   10111 #undef ADD
   10112 
   10113 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10114 }
   10115 
   10116 /*
   10117  * wm_tbi_mediachange:	[ifmedia interface function]
   10118  *
   10119  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10120  */
   10121 static int
   10122 wm_tbi_mediachange(struct ifnet *ifp)
   10123 {
   10124 	struct wm_softc *sc = ifp->if_softc;
   10125 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10126 	uint32_t status;
   10127 	int i;
   10128 
   10129 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10130 		/* XXX need some work for >= 82571 and < 82575 */
   10131 		if (sc->sc_type < WM_T_82575)
   10132 			return 0;
   10133 	}
   10134 
   10135 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10136 	    || (sc->sc_type >= WM_T_82575))
   10137 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10138 
   10139 	sc->sc_ctrl &= ~CTRL_LRST;
   10140 	sc->sc_txcw = TXCW_ANE;
   10141 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10142 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10143 	else if (ife->ifm_media & IFM_FDX)
   10144 		sc->sc_txcw |= TXCW_FD;
   10145 	else
   10146 		sc->sc_txcw |= TXCW_HD;
   10147 
   10148 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10149 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10150 
   10151 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10152 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10153 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10154 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10155 	CSR_WRITE_FLUSH(sc);
   10156 	delay(1000);
   10157 
   10158 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10159 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10160 
   10161 	/*
   10162 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10163 	 * optics detect a signal, 0 if they don't.
   10164 	 */
   10165 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10166 		/* Have signal; wait for the link to come up. */
   10167 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10168 			delay(10000);
   10169 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10170 				break;
   10171 		}
   10172 
   10173 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10174 			    device_xname(sc->sc_dev),i));
   10175 
   10176 		status = CSR_READ(sc, WMREG_STATUS);
   10177 		DPRINTF(WM_DEBUG_LINK,
   10178 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10179 			device_xname(sc->sc_dev),status, STATUS_LU));
   10180 		if (status & STATUS_LU) {
   10181 			/* Link is up. */
   10182 			DPRINTF(WM_DEBUG_LINK,
   10183 			    ("%s: LINK: set media -> link up %s\n",
   10184 			    device_xname(sc->sc_dev),
   10185 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10186 
   10187 			/*
   10188 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10189 			 * so we should update sc->sc_ctrl
   10190 			 */
   10191 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10192 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10193 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10194 			if (status & STATUS_FD)
   10195 				sc->sc_tctl |=
   10196 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10197 			else
   10198 				sc->sc_tctl |=
   10199 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10200 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10201 				sc->sc_fcrtl |= FCRTL_XONE;
   10202 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10203 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10204 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10205 				      sc->sc_fcrtl);
   10206 			sc->sc_tbi_linkup = 1;
   10207 		} else {
   10208 			if (i == WM_LINKUP_TIMEOUT)
   10209 				wm_check_for_link(sc);
   10210 			/* Link is down. */
   10211 			DPRINTF(WM_DEBUG_LINK,
   10212 			    ("%s: LINK: set media -> link down\n",
   10213 			    device_xname(sc->sc_dev)));
   10214 			sc->sc_tbi_linkup = 0;
   10215 		}
   10216 	} else {
   10217 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10218 		    device_xname(sc->sc_dev)));
   10219 		sc->sc_tbi_linkup = 0;
   10220 	}
   10221 
   10222 	wm_tbi_serdes_set_linkled(sc);
   10223 
   10224 	return 0;
   10225 }
   10226 
   10227 /*
   10228  * wm_tbi_mediastatus:	[ifmedia interface function]
   10229  *
   10230  *	Get the current interface media status on a 1000BASE-X device.
   10231  */
   10232 static void
   10233 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10234 {
   10235 	struct wm_softc *sc = ifp->if_softc;
   10236 	uint32_t ctrl, status;
   10237 
   10238 	ifmr->ifm_status = IFM_AVALID;
   10239 	ifmr->ifm_active = IFM_ETHER;
   10240 
   10241 	status = CSR_READ(sc, WMREG_STATUS);
   10242 	if ((status & STATUS_LU) == 0) {
   10243 		ifmr->ifm_active |= IFM_NONE;
   10244 		return;
   10245 	}
   10246 
   10247 	ifmr->ifm_status |= IFM_ACTIVE;
   10248 	/* Only 82545 is LX */
   10249 	if (sc->sc_type == WM_T_82545)
   10250 		ifmr->ifm_active |= IFM_1000_LX;
   10251 	else
   10252 		ifmr->ifm_active |= IFM_1000_SX;
   10253 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10254 		ifmr->ifm_active |= IFM_FDX;
   10255 	else
   10256 		ifmr->ifm_active |= IFM_HDX;
   10257 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10258 	if (ctrl & CTRL_RFCE)
   10259 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10260 	if (ctrl & CTRL_TFCE)
   10261 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10262 }
   10263 
   10264 /* XXX TBI only */
   10265 static int
   10266 wm_check_for_link(struct wm_softc *sc)
   10267 {
   10268 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10269 	uint32_t rxcw;
   10270 	uint32_t ctrl;
   10271 	uint32_t status;
   10272 	uint32_t sig;
   10273 
   10274 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10275 		/* XXX need some work for >= 82571 */
   10276 		if (sc->sc_type >= WM_T_82571) {
   10277 			sc->sc_tbi_linkup = 1;
   10278 			return 0;
   10279 		}
   10280 	}
   10281 
   10282 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10283 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10284 	status = CSR_READ(sc, WMREG_STATUS);
   10285 
   10286 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10287 
   10288 	DPRINTF(WM_DEBUG_LINK,
   10289 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10290 		device_xname(sc->sc_dev), __func__,
   10291 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10292 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10293 
   10294 	/*
   10295 	 * SWDPIN   LU RXCW
   10296 	 *      0    0    0
   10297 	 *      0    0    1	(should not happen)
   10298 	 *      0    1    0	(should not happen)
   10299 	 *      0    1    1	(should not happen)
   10300 	 *      1    0    0	Disable autonego and force linkup
   10301 	 *      1    0    1	got /C/ but not linkup yet
   10302 	 *      1    1    0	(linkup)
   10303 	 *      1    1    1	If IFM_AUTO, back to autonego
   10304 	 *
   10305 	 */
   10306 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10307 	    && ((status & STATUS_LU) == 0)
   10308 	    && ((rxcw & RXCW_C) == 0)) {
   10309 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10310 			__func__));
   10311 		sc->sc_tbi_linkup = 0;
   10312 		/* Disable auto-negotiation in the TXCW register */
   10313 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10314 
   10315 		/*
   10316 		 * Force link-up and also force full-duplex.
   10317 		 *
   10318 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10319 		 * so we should update sc->sc_ctrl
   10320 		 */
   10321 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10322 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10323 	} else if (((status & STATUS_LU) != 0)
   10324 	    && ((rxcw & RXCW_C) != 0)
   10325 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10326 		sc->sc_tbi_linkup = 1;
   10327 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10328 			__func__));
   10329 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10330 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10331 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10332 	    && ((rxcw & RXCW_C) != 0)) {
   10333 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10334 	} else {
   10335 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10336 			status));
   10337 	}
   10338 
   10339 	return 0;
   10340 }
   10341 
   10342 /*
   10343  * wm_tbi_tick:
   10344  *
   10345  *	Check the link on TBI devices.
   10346  *	This function acts as mii_tick().
   10347  */
   10348 static void
   10349 wm_tbi_tick(struct wm_softc *sc)
   10350 {
   10351 	struct mii_data *mii = &sc->sc_mii;
   10352 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10353 	uint32_t status;
   10354 
   10355 	KASSERT(WM_CORE_LOCKED(sc));
   10356 
   10357 	status = CSR_READ(sc, WMREG_STATUS);
   10358 
   10359 	/* XXX is this needed? */
   10360 	(void)CSR_READ(sc, WMREG_RXCW);
   10361 	(void)CSR_READ(sc, WMREG_CTRL);
   10362 
   10363 	/* set link status */
   10364 	if ((status & STATUS_LU) == 0) {
   10365 		DPRINTF(WM_DEBUG_LINK,
   10366 		    ("%s: LINK: checklink -> down\n",
   10367 			device_xname(sc->sc_dev)));
   10368 		sc->sc_tbi_linkup = 0;
   10369 	} else if (sc->sc_tbi_linkup == 0) {
   10370 		DPRINTF(WM_DEBUG_LINK,
   10371 		    ("%s: LINK: checklink -> up %s\n",
   10372 			device_xname(sc->sc_dev),
   10373 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10374 		sc->sc_tbi_linkup = 1;
   10375 		sc->sc_tbi_serdes_ticks = 0;
   10376 	}
   10377 
   10378 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10379 		goto setled;
   10380 
   10381 	if ((status & STATUS_LU) == 0) {
   10382 		sc->sc_tbi_linkup = 0;
   10383 		/* If the timer expired, retry autonegotiation */
   10384 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10385 		    && (++sc->sc_tbi_serdes_ticks
   10386 			>= sc->sc_tbi_serdes_anegticks)) {
   10387 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10388 			sc->sc_tbi_serdes_ticks = 0;
   10389 			/*
   10390 			 * Reset the link, and let autonegotiation do
   10391 			 * its thing
   10392 			 */
   10393 			sc->sc_ctrl |= CTRL_LRST;
   10394 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10395 			CSR_WRITE_FLUSH(sc);
   10396 			delay(1000);
   10397 			sc->sc_ctrl &= ~CTRL_LRST;
   10398 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10399 			CSR_WRITE_FLUSH(sc);
   10400 			delay(1000);
   10401 			CSR_WRITE(sc, WMREG_TXCW,
   10402 			    sc->sc_txcw & ~TXCW_ANE);
   10403 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10404 		}
   10405 	}
   10406 
   10407 setled:
   10408 	wm_tbi_serdes_set_linkled(sc);
   10409 }
   10410 
   10411 /* SERDES related */
   10412 static void
   10413 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10414 {
   10415 	uint32_t reg;
   10416 
   10417 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10418 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10419 		return;
   10420 
   10421 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10422 	reg |= PCS_CFG_PCS_EN;
   10423 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10424 
   10425 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10426 	reg &= ~CTRL_EXT_SWDPIN(3);
   10427 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10428 	CSR_WRITE_FLUSH(sc);
   10429 }
   10430 
   10431 static int
   10432 wm_serdes_mediachange(struct ifnet *ifp)
   10433 {
   10434 	struct wm_softc *sc = ifp->if_softc;
   10435 	bool pcs_autoneg = true; /* XXX */
   10436 	uint32_t ctrl_ext, pcs_lctl, reg;
   10437 
   10438 	/* XXX Currently, this function is not called on 8257[12] */
   10439 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10440 	    || (sc->sc_type >= WM_T_82575))
   10441 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10442 
   10443 	wm_serdes_power_up_link_82575(sc);
   10444 
   10445 	sc->sc_ctrl |= CTRL_SLU;
   10446 
   10447 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10448 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10449 
   10450 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10451 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10452 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10453 	case CTRL_EXT_LINK_MODE_SGMII:
   10454 		pcs_autoneg = true;
   10455 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10456 		break;
   10457 	case CTRL_EXT_LINK_MODE_1000KX:
   10458 		pcs_autoneg = false;
   10459 		/* FALLTHROUGH */
   10460 	default:
   10461 		if ((sc->sc_type == WM_T_82575)
   10462 		    || (sc->sc_type == WM_T_82576)) {
   10463 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10464 				pcs_autoneg = false;
   10465 		}
   10466 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10467 		    | CTRL_FRCFDX;
   10468 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10469 	}
   10470 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10471 
   10472 	if (pcs_autoneg) {
   10473 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10474 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10475 
   10476 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10477 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10478 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10479 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10480 	} else
   10481 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10482 
   10483 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10484 
   10485 
   10486 	return 0;
   10487 }
   10488 
   10489 static void
   10490 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10491 {
   10492 	struct wm_softc *sc = ifp->if_softc;
   10493 	struct mii_data *mii = &sc->sc_mii;
   10494 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10495 	uint32_t pcs_adv, pcs_lpab, reg;
   10496 
   10497 	ifmr->ifm_status = IFM_AVALID;
   10498 	ifmr->ifm_active = IFM_ETHER;
   10499 
   10500 	/* Check PCS */
   10501 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10502 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10503 		ifmr->ifm_active |= IFM_NONE;
   10504 		sc->sc_tbi_linkup = 0;
   10505 		goto setled;
   10506 	}
   10507 
   10508 	sc->sc_tbi_linkup = 1;
   10509 	ifmr->ifm_status |= IFM_ACTIVE;
   10510 	if (sc->sc_type == WM_T_I354) {
   10511 		uint32_t status;
   10512 
   10513 		status = CSR_READ(sc, WMREG_STATUS);
   10514 		if (((status & STATUS_2P5_SKU) != 0)
   10515 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10516 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10517 		} else
   10518 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10519 	} else {
   10520 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10521 		case PCS_LSTS_SPEED_10:
   10522 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10523 			break;
   10524 		case PCS_LSTS_SPEED_100:
   10525 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10526 			break;
   10527 		case PCS_LSTS_SPEED_1000:
   10528 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10529 			break;
   10530 		default:
   10531 			device_printf(sc->sc_dev, "Unknown speed\n");
   10532 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10533 			break;
   10534 		}
   10535 	}
   10536 	if ((reg & PCS_LSTS_FDX) != 0)
   10537 		ifmr->ifm_active |= IFM_FDX;
   10538 	else
   10539 		ifmr->ifm_active |= IFM_HDX;
   10540 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10541 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10542 		/* Check flow */
   10543 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10544 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10545 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10546 			goto setled;
   10547 		}
   10548 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10549 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10550 		DPRINTF(WM_DEBUG_LINK,
   10551 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10552 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10553 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10554 			mii->mii_media_active |= IFM_FLOW
   10555 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10556 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10557 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10558 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10559 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10560 			mii->mii_media_active |= IFM_FLOW
   10561 			    | IFM_ETH_TXPAUSE;
   10562 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10563 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10564 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10565 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10566 			mii->mii_media_active |= IFM_FLOW
   10567 			    | IFM_ETH_RXPAUSE;
   10568 		}
   10569 	}
   10570 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10571 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10572 setled:
   10573 	wm_tbi_serdes_set_linkled(sc);
   10574 }
   10575 
   10576 /*
   10577  * wm_serdes_tick:
   10578  *
   10579  *	Check the link on serdes devices.
   10580  */
   10581 static void
   10582 wm_serdes_tick(struct wm_softc *sc)
   10583 {
   10584 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10585 	struct mii_data *mii = &sc->sc_mii;
   10586 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10587 	uint32_t reg;
   10588 
   10589 	KASSERT(WM_CORE_LOCKED(sc));
   10590 
   10591 	mii->mii_media_status = IFM_AVALID;
   10592 	mii->mii_media_active = IFM_ETHER;
   10593 
   10594 	/* Check PCS */
   10595 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10596 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10597 		mii->mii_media_status |= IFM_ACTIVE;
   10598 		sc->sc_tbi_linkup = 1;
   10599 		sc->sc_tbi_serdes_ticks = 0;
   10600 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10601 		if ((reg & PCS_LSTS_FDX) != 0)
   10602 			mii->mii_media_active |= IFM_FDX;
   10603 		else
   10604 			mii->mii_media_active |= IFM_HDX;
   10605 	} else {
   10606 		mii->mii_media_status |= IFM_NONE;
   10607 		sc->sc_tbi_linkup = 0;
   10608 		/* If the timer expired, retry autonegotiation */
   10609 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10610 		    && (++sc->sc_tbi_serdes_ticks
   10611 			>= sc->sc_tbi_serdes_anegticks)) {
   10612 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10613 			sc->sc_tbi_serdes_ticks = 0;
   10614 			/* XXX */
   10615 			wm_serdes_mediachange(ifp);
   10616 		}
   10617 	}
   10618 
   10619 	wm_tbi_serdes_set_linkled(sc);
   10620 }
   10621 
   10622 /* SFP related */
   10623 
   10624 static int
   10625 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10626 {
   10627 	uint32_t i2ccmd;
   10628 	int i;
   10629 
   10630 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10631 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10632 
   10633 	/* Poll the ready bit */
   10634 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10635 		delay(50);
   10636 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10637 		if (i2ccmd & I2CCMD_READY)
   10638 			break;
   10639 	}
   10640 	if ((i2ccmd & I2CCMD_READY) == 0)
   10641 		return -1;
   10642 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10643 		return -1;
   10644 
   10645 	*data = i2ccmd & 0x00ff;
   10646 
   10647 	return 0;
   10648 }
   10649 
   10650 static uint32_t
   10651 wm_sfp_get_media_type(struct wm_softc *sc)
   10652 {
   10653 	uint32_t ctrl_ext;
   10654 	uint8_t val = 0;
   10655 	int timeout = 3;
   10656 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   10657 	int rv = -1;
   10658 
   10659 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10660 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   10661 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   10662 	CSR_WRITE_FLUSH(sc);
   10663 
   10664 	/* Read SFP module data */
   10665 	while (timeout) {
   10666 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   10667 		if (rv == 0)
   10668 			break;
   10669 		delay(100*1000); /* XXX too big */
   10670 		timeout--;
   10671 	}
   10672 	if (rv != 0)
   10673 		goto out;
   10674 	switch (val) {
   10675 	case SFF_SFP_ID_SFF:
   10676 		aprint_normal_dev(sc->sc_dev,
   10677 		    "Module/Connector soldered to board\n");
   10678 		break;
   10679 	case SFF_SFP_ID_SFP:
   10680 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   10681 		break;
   10682 	case SFF_SFP_ID_UNKNOWN:
   10683 		goto out;
   10684 	default:
   10685 		break;
   10686 	}
   10687 
   10688 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   10689 	if (rv != 0) {
   10690 		goto out;
   10691 	}
   10692 
   10693 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   10694 		mediatype = WM_MEDIATYPE_SERDES;
   10695 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   10696 		sc->sc_flags |= WM_F_SGMII;
   10697 		mediatype = WM_MEDIATYPE_COPPER;
   10698 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   10699 		sc->sc_flags |= WM_F_SGMII;
   10700 		mediatype = WM_MEDIATYPE_SERDES;
   10701 	}
   10702 
   10703 out:
   10704 	/* Restore I2C interface setting */
   10705 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10706 
   10707 	return mediatype;
   10708 }
   10709 
   10710 /*
   10711  * NVM related.
   10712  * Microwire, SPI (w/wo EERD) and Flash.
   10713  */
   10714 
   10715 /* Both spi and uwire */
   10716 
   10717 /*
   10718  * wm_eeprom_sendbits:
   10719  *
   10720  *	Send a series of bits to the EEPROM.
   10721  */
   10722 static void
   10723 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   10724 {
   10725 	uint32_t reg;
   10726 	int x;
   10727 
   10728 	reg = CSR_READ(sc, WMREG_EECD);
   10729 
   10730 	for (x = nbits; x > 0; x--) {
   10731 		if (bits & (1U << (x - 1)))
   10732 			reg |= EECD_DI;
   10733 		else
   10734 			reg &= ~EECD_DI;
   10735 		CSR_WRITE(sc, WMREG_EECD, reg);
   10736 		CSR_WRITE_FLUSH(sc);
   10737 		delay(2);
   10738 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10739 		CSR_WRITE_FLUSH(sc);
   10740 		delay(2);
   10741 		CSR_WRITE(sc, WMREG_EECD, reg);
   10742 		CSR_WRITE_FLUSH(sc);
   10743 		delay(2);
   10744 	}
   10745 }
   10746 
   10747 /*
   10748  * wm_eeprom_recvbits:
   10749  *
   10750  *	Receive a series of bits from the EEPROM.
   10751  */
   10752 static void
   10753 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   10754 {
   10755 	uint32_t reg, val;
   10756 	int x;
   10757 
   10758 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   10759 
   10760 	val = 0;
   10761 	for (x = nbits; x > 0; x--) {
   10762 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10763 		CSR_WRITE_FLUSH(sc);
   10764 		delay(2);
   10765 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   10766 			val |= (1U << (x - 1));
   10767 		CSR_WRITE(sc, WMREG_EECD, reg);
   10768 		CSR_WRITE_FLUSH(sc);
   10769 		delay(2);
   10770 	}
   10771 	*valp = val;
   10772 }
   10773 
   10774 /* Microwire */
   10775 
   10776 /*
   10777  * wm_nvm_read_uwire:
   10778  *
   10779  *	Read a word from the EEPROM using the MicroWire protocol.
   10780  */
   10781 static int
   10782 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10783 {
   10784 	uint32_t reg, val;
   10785 	int i;
   10786 
   10787 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10788 		device_xname(sc->sc_dev), __func__));
   10789 
   10790 	for (i = 0; i < wordcnt; i++) {
   10791 		/* Clear SK and DI. */
   10792 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   10793 		CSR_WRITE(sc, WMREG_EECD, reg);
   10794 
   10795 		/*
   10796 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   10797 		 * and Xen.
   10798 		 *
   10799 		 * We use this workaround only for 82540 because qemu's
   10800 		 * e1000 act as 82540.
   10801 		 */
   10802 		if (sc->sc_type == WM_T_82540) {
   10803 			reg |= EECD_SK;
   10804 			CSR_WRITE(sc, WMREG_EECD, reg);
   10805 			reg &= ~EECD_SK;
   10806 			CSR_WRITE(sc, WMREG_EECD, reg);
   10807 			CSR_WRITE_FLUSH(sc);
   10808 			delay(2);
   10809 		}
   10810 		/* XXX: end of workaround */
   10811 
   10812 		/* Set CHIP SELECT. */
   10813 		reg |= EECD_CS;
   10814 		CSR_WRITE(sc, WMREG_EECD, reg);
   10815 		CSR_WRITE_FLUSH(sc);
   10816 		delay(2);
   10817 
   10818 		/* Shift in the READ command. */
   10819 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   10820 
   10821 		/* Shift in address. */
   10822 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   10823 
   10824 		/* Shift out the data. */
   10825 		wm_eeprom_recvbits(sc, &val, 16);
   10826 		data[i] = val & 0xffff;
   10827 
   10828 		/* Clear CHIP SELECT. */
   10829 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   10830 		CSR_WRITE(sc, WMREG_EECD, reg);
   10831 		CSR_WRITE_FLUSH(sc);
   10832 		delay(2);
   10833 	}
   10834 
   10835 	return 0;
   10836 }
   10837 
   10838 /* SPI */
   10839 
   10840 /*
   10841  * Set SPI and FLASH related information from the EECD register.
   10842  * For 82541 and 82547, the word size is taken from EEPROM.
   10843  */
   10844 static int
   10845 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   10846 {
   10847 	int size;
   10848 	uint32_t reg;
   10849 	uint16_t data;
   10850 
   10851 	reg = CSR_READ(sc, WMREG_EECD);
   10852 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   10853 
   10854 	/* Read the size of NVM from EECD by default */
   10855 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10856 	switch (sc->sc_type) {
   10857 	case WM_T_82541:
   10858 	case WM_T_82541_2:
   10859 	case WM_T_82547:
   10860 	case WM_T_82547_2:
   10861 		/* Set dummy value to access EEPROM */
   10862 		sc->sc_nvm_wordsize = 64;
   10863 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   10864 		reg = data;
   10865 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10866 		if (size == 0)
   10867 			size = 6; /* 64 word size */
   10868 		else
   10869 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   10870 		break;
   10871 	case WM_T_80003:
   10872 	case WM_T_82571:
   10873 	case WM_T_82572:
   10874 	case WM_T_82573: /* SPI case */
   10875 	case WM_T_82574: /* SPI case */
   10876 	case WM_T_82583: /* SPI case */
   10877 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10878 		if (size > 14)
   10879 			size = 14;
   10880 		break;
   10881 	case WM_T_82575:
   10882 	case WM_T_82576:
   10883 	case WM_T_82580:
   10884 	case WM_T_I350:
   10885 	case WM_T_I354:
   10886 	case WM_T_I210:
   10887 	case WM_T_I211:
   10888 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10889 		if (size > 15)
   10890 			size = 15;
   10891 		break;
   10892 	default:
   10893 		aprint_error_dev(sc->sc_dev,
   10894 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   10895 		return -1;
   10896 		break;
   10897 	}
   10898 
   10899 	sc->sc_nvm_wordsize = 1 << size;
   10900 
   10901 	return 0;
   10902 }
   10903 
   10904 /*
   10905  * wm_nvm_ready_spi:
   10906  *
   10907  *	Wait for a SPI EEPROM to be ready for commands.
   10908  */
   10909 static int
   10910 wm_nvm_ready_spi(struct wm_softc *sc)
   10911 {
   10912 	uint32_t val;
   10913 	int usec;
   10914 
   10915 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10916 		device_xname(sc->sc_dev), __func__));
   10917 
   10918 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10919 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10920 		wm_eeprom_recvbits(sc, &val, 8);
   10921 		if ((val & SPI_SR_RDY) == 0)
   10922 			break;
   10923 	}
   10924 	if (usec >= SPI_MAX_RETRIES) {
   10925 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10926 		return 1;
   10927 	}
   10928 	return 0;
   10929 }
   10930 
   10931 /*
   10932  * wm_nvm_read_spi:
   10933  *
   10934  *	Read a work from the EEPROM using the SPI protocol.
   10935  */
   10936 static int
   10937 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10938 {
   10939 	uint32_t reg, val;
   10940 	int i;
   10941 	uint8_t opc;
   10942 
   10943 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10944 		device_xname(sc->sc_dev), __func__));
   10945 
   10946 	/* Clear SK and CS. */
   10947 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10948 	CSR_WRITE(sc, WMREG_EECD, reg);
   10949 	CSR_WRITE_FLUSH(sc);
   10950 	delay(2);
   10951 
   10952 	if (wm_nvm_ready_spi(sc))
   10953 		return 1;
   10954 
   10955 	/* Toggle CS to flush commands. */
   10956 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10957 	CSR_WRITE_FLUSH(sc);
   10958 	delay(2);
   10959 	CSR_WRITE(sc, WMREG_EECD, reg);
   10960 	CSR_WRITE_FLUSH(sc);
   10961 	delay(2);
   10962 
   10963 	opc = SPI_OPC_READ;
   10964 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10965 		opc |= SPI_OPC_A8;
   10966 
   10967 	wm_eeprom_sendbits(sc, opc, 8);
   10968 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10969 
   10970 	for (i = 0; i < wordcnt; i++) {
   10971 		wm_eeprom_recvbits(sc, &val, 16);
   10972 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10973 	}
   10974 
   10975 	/* Raise CS and clear SK. */
   10976 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10977 	CSR_WRITE(sc, WMREG_EECD, reg);
   10978 	CSR_WRITE_FLUSH(sc);
   10979 	delay(2);
   10980 
   10981 	return 0;
   10982 }
   10983 
   10984 /* Using with EERD */
   10985 
   10986 static int
   10987 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10988 {
   10989 	uint32_t attempts = 100000;
   10990 	uint32_t i, reg = 0;
   10991 	int32_t done = -1;
   10992 
   10993 	for (i = 0; i < attempts; i++) {
   10994 		reg = CSR_READ(sc, rw);
   10995 
   10996 		if (reg & EERD_DONE) {
   10997 			done = 0;
   10998 			break;
   10999 		}
   11000 		delay(5);
   11001 	}
   11002 
   11003 	return done;
   11004 }
   11005 
   11006 static int
   11007 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11008     uint16_t *data)
   11009 {
   11010 	int i, eerd = 0;
   11011 	int error = 0;
   11012 
   11013 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11014 		device_xname(sc->sc_dev), __func__));
   11015 
   11016 	for (i = 0; i < wordcnt; i++) {
   11017 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11018 
   11019 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11020 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11021 		if (error != 0)
   11022 			break;
   11023 
   11024 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11025 	}
   11026 
   11027 	return error;
   11028 }
   11029 
   11030 /* Flash */
   11031 
   11032 static int
   11033 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11034 {
   11035 	uint32_t eecd;
   11036 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11037 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11038 	uint8_t sig_byte = 0;
   11039 
   11040 	switch (sc->sc_type) {
   11041 	case WM_T_PCH_SPT:
   11042 		/*
   11043 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11044 		 * sector valid bits from the NVM.
   11045 		 */
   11046 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11047 		if ((*bank == 0) || (*bank == 1)) {
   11048 			aprint_error_dev(sc->sc_dev,
   11049 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11050 				*bank);
   11051 			return -1;
   11052 		} else {
   11053 			*bank = *bank - 2;
   11054 			return 0;
   11055 		}
   11056 	case WM_T_ICH8:
   11057 	case WM_T_ICH9:
   11058 		eecd = CSR_READ(sc, WMREG_EECD);
   11059 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11060 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11061 			return 0;
   11062 		}
   11063 		/* FALLTHROUGH */
   11064 	default:
   11065 		/* Default to 0 */
   11066 		*bank = 0;
   11067 
   11068 		/* Check bank 0 */
   11069 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11070 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11071 			*bank = 0;
   11072 			return 0;
   11073 		}
   11074 
   11075 		/* Check bank 1 */
   11076 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11077 		    &sig_byte);
   11078 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11079 			*bank = 1;
   11080 			return 0;
   11081 		}
   11082 	}
   11083 
   11084 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11085 		device_xname(sc->sc_dev)));
   11086 	return -1;
   11087 }
   11088 
   11089 /******************************************************************************
   11090  * This function does initial flash setup so that a new read/write/erase cycle
   11091  * can be started.
   11092  *
   11093  * sc - The pointer to the hw structure
   11094  ****************************************************************************/
   11095 static int32_t
   11096 wm_ich8_cycle_init(struct wm_softc *sc)
   11097 {
   11098 	uint16_t hsfsts;
   11099 	int32_t error = 1;
   11100 	int32_t i     = 0;
   11101 
   11102 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11103 
   11104 	/* May be check the Flash Des Valid bit in Hw status */
   11105 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11106 		return error;
   11107 	}
   11108 
   11109 	/* Clear FCERR in Hw status by writing 1 */
   11110 	/* Clear DAEL in Hw status by writing a 1 */
   11111 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11112 
   11113 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11114 
   11115 	/*
   11116 	 * Either we should have a hardware SPI cycle in progress bit to check
   11117 	 * against, in order to start a new cycle or FDONE bit should be
   11118 	 * changed in the hardware so that it is 1 after harware reset, which
   11119 	 * can then be used as an indication whether a cycle is in progress or
   11120 	 * has been completed .. we should also have some software semaphore
   11121 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11122 	 * threads access to those bits can be sequentiallized or a way so that
   11123 	 * 2 threads dont start the cycle at the same time
   11124 	 */
   11125 
   11126 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11127 		/*
   11128 		 * There is no cycle running at present, so we can start a
   11129 		 * cycle
   11130 		 */
   11131 
   11132 		/* Begin by setting Flash Cycle Done. */
   11133 		hsfsts |= HSFSTS_DONE;
   11134 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11135 		error = 0;
   11136 	} else {
   11137 		/*
   11138 		 * otherwise poll for sometime so the current cycle has a
   11139 		 * chance to end before giving up.
   11140 		 */
   11141 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11142 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11143 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11144 				error = 0;
   11145 				break;
   11146 			}
   11147 			delay(1);
   11148 		}
   11149 		if (error == 0) {
   11150 			/*
   11151 			 * Successful in waiting for previous cycle to timeout,
   11152 			 * now set the Flash Cycle Done.
   11153 			 */
   11154 			hsfsts |= HSFSTS_DONE;
   11155 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11156 		}
   11157 	}
   11158 	return error;
   11159 }
   11160 
   11161 /******************************************************************************
   11162  * This function starts a flash cycle and waits for its completion
   11163  *
   11164  * sc - The pointer to the hw structure
   11165  ****************************************************************************/
   11166 static int32_t
   11167 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11168 {
   11169 	uint16_t hsflctl;
   11170 	uint16_t hsfsts;
   11171 	int32_t error = 1;
   11172 	uint32_t i = 0;
   11173 
   11174 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11175 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11176 	hsflctl |= HSFCTL_GO;
   11177 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11178 
   11179 	/* Wait till FDONE bit is set to 1 */
   11180 	do {
   11181 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11182 		if (hsfsts & HSFSTS_DONE)
   11183 			break;
   11184 		delay(1);
   11185 		i++;
   11186 	} while (i < timeout);
   11187 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11188 		error = 0;
   11189 
   11190 	return error;
   11191 }
   11192 
   11193 /******************************************************************************
   11194  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11195  *
   11196  * sc - The pointer to the hw structure
   11197  * index - The index of the byte or word to read.
   11198  * size - Size of data to read, 1=byte 2=word, 4=dword
   11199  * data - Pointer to the word to store the value read.
   11200  *****************************************************************************/
   11201 static int32_t
   11202 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11203     uint32_t size, uint32_t *data)
   11204 {
   11205 	uint16_t hsfsts;
   11206 	uint16_t hsflctl;
   11207 	uint32_t flash_linear_address;
   11208 	uint32_t flash_data = 0;
   11209 	int32_t error = 1;
   11210 	int32_t count = 0;
   11211 
   11212 	if (size < 1  || size > 4 || data == 0x0 ||
   11213 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11214 		return error;
   11215 
   11216 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11217 	    sc->sc_ich8_flash_base;
   11218 
   11219 	do {
   11220 		delay(1);
   11221 		/* Steps */
   11222 		error = wm_ich8_cycle_init(sc);
   11223 		if (error)
   11224 			break;
   11225 
   11226 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11227 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11228 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11229 		    & HSFCTL_BCOUNT_MASK;
   11230 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11231 		if (sc->sc_type == WM_T_PCH_SPT) {
   11232 			/*
   11233 			 * In SPT, This register is in Lan memory space, not
   11234 			 * flash. Therefore, only 32 bit access is supported.
   11235 			 */
   11236 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11237 			    (uint32_t)hsflctl);
   11238 		} else
   11239 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11240 
   11241 		/*
   11242 		 * Write the last 24 bits of index into Flash Linear address
   11243 		 * field in Flash Address
   11244 		 */
   11245 		/* TODO: TBD maybe check the index against the size of flash */
   11246 
   11247 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11248 
   11249 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11250 
   11251 		/*
   11252 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11253 		 * the whole sequence a few more times, else read in (shift in)
   11254 		 * the Flash Data0, the order is least significant byte first
   11255 		 * msb to lsb
   11256 		 */
   11257 		if (error == 0) {
   11258 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11259 			if (size == 1)
   11260 				*data = (uint8_t)(flash_data & 0x000000FF);
   11261 			else if (size == 2)
   11262 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11263 			else if (size == 4)
   11264 				*data = (uint32_t)flash_data;
   11265 			break;
   11266 		} else {
   11267 			/*
   11268 			 * If we've gotten here, then things are probably
   11269 			 * completely hosed, but if the error condition is
   11270 			 * detected, it won't hurt to give it another try...
   11271 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11272 			 */
   11273 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11274 			if (hsfsts & HSFSTS_ERR) {
   11275 				/* Repeat for some time before giving up. */
   11276 				continue;
   11277 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11278 				break;
   11279 		}
   11280 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11281 
   11282 	return error;
   11283 }
   11284 
   11285 /******************************************************************************
   11286  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11287  *
   11288  * sc - pointer to wm_hw structure
   11289  * index - The index of the byte to read.
   11290  * data - Pointer to a byte to store the value read.
   11291  *****************************************************************************/
   11292 static int32_t
   11293 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11294 {
   11295 	int32_t status;
   11296 	uint32_t word = 0;
   11297 
   11298 	status = wm_read_ich8_data(sc, index, 1, &word);
   11299 	if (status == 0)
   11300 		*data = (uint8_t)word;
   11301 	else
   11302 		*data = 0;
   11303 
   11304 	return status;
   11305 }
   11306 
   11307 /******************************************************************************
   11308  * Reads a word from the NVM using the ICH8 flash access registers.
   11309  *
   11310  * sc - pointer to wm_hw structure
   11311  * index - The starting byte index of the word to read.
   11312  * data - Pointer to a word to store the value read.
   11313  *****************************************************************************/
   11314 static int32_t
   11315 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11316 {
   11317 	int32_t status;
   11318 	uint32_t word = 0;
   11319 
   11320 	status = wm_read_ich8_data(sc, index, 2, &word);
   11321 	if (status == 0)
   11322 		*data = (uint16_t)word;
   11323 	else
   11324 		*data = 0;
   11325 
   11326 	return status;
   11327 }
   11328 
   11329 /******************************************************************************
   11330  * Reads a dword from the NVM using the ICH8 flash access registers.
   11331  *
   11332  * sc - pointer to wm_hw structure
   11333  * index - The starting byte index of the word to read.
   11334  * data - Pointer to a word to store the value read.
   11335  *****************************************************************************/
   11336 static int32_t
   11337 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11338 {
   11339 	int32_t status;
   11340 
   11341 	status = wm_read_ich8_data(sc, index, 4, data);
   11342 	return status;
   11343 }
   11344 
   11345 /******************************************************************************
   11346  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11347  * register.
   11348  *
   11349  * sc - Struct containing variables accessed by shared code
   11350  * offset - offset of word in the EEPROM to read
   11351  * data - word read from the EEPROM
   11352  * words - number of words to read
   11353  *****************************************************************************/
   11354 static int
   11355 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11356 {
   11357 	int32_t  error = 0;
   11358 	uint32_t flash_bank = 0;
   11359 	uint32_t act_offset = 0;
   11360 	uint32_t bank_offset = 0;
   11361 	uint16_t word = 0;
   11362 	uint16_t i = 0;
   11363 
   11364 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11365 		device_xname(sc->sc_dev), __func__));
   11366 
   11367 	/*
   11368 	 * We need to know which is the valid flash bank.  In the event
   11369 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11370 	 * managing flash_bank.  So it cannot be trusted and needs
   11371 	 * to be updated with each read.
   11372 	 */
   11373 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11374 	if (error) {
   11375 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11376 			device_xname(sc->sc_dev)));
   11377 		flash_bank = 0;
   11378 	}
   11379 
   11380 	/*
   11381 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11382 	 * size
   11383 	 */
   11384 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11385 
   11386 	error = wm_get_swfwhw_semaphore(sc);
   11387 	if (error) {
   11388 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11389 		    __func__);
   11390 		return error;
   11391 	}
   11392 
   11393 	for (i = 0; i < words; i++) {
   11394 		/* The NVM part needs a byte offset, hence * 2 */
   11395 		act_offset = bank_offset + ((offset + i) * 2);
   11396 		error = wm_read_ich8_word(sc, act_offset, &word);
   11397 		if (error) {
   11398 			aprint_error_dev(sc->sc_dev,
   11399 			    "%s: failed to read NVM\n", __func__);
   11400 			break;
   11401 		}
   11402 		data[i] = word;
   11403 	}
   11404 
   11405 	wm_put_swfwhw_semaphore(sc);
   11406 	return error;
   11407 }
   11408 
   11409 /******************************************************************************
   11410  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11411  * register.
   11412  *
   11413  * sc - Struct containing variables accessed by shared code
   11414  * offset - offset of word in the EEPROM to read
   11415  * data - word read from the EEPROM
   11416  * words - number of words to read
   11417  *****************************************************************************/
   11418 static int
   11419 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11420 {
   11421 	int32_t  error = 0;
   11422 	uint32_t flash_bank = 0;
   11423 	uint32_t act_offset = 0;
   11424 	uint32_t bank_offset = 0;
   11425 	uint32_t dword = 0;
   11426 	uint16_t i = 0;
   11427 
   11428 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11429 		device_xname(sc->sc_dev), __func__));
   11430 
   11431 	/*
   11432 	 * We need to know which is the valid flash bank.  In the event
   11433 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11434 	 * managing flash_bank.  So it cannot be trusted and needs
   11435 	 * to be updated with each read.
   11436 	 */
   11437 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11438 	if (error) {
   11439 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11440 			device_xname(sc->sc_dev)));
   11441 		flash_bank = 0;
   11442 	}
   11443 
   11444 	/*
   11445 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11446 	 * size
   11447 	 */
   11448 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11449 
   11450 	error = wm_get_swfwhw_semaphore(sc);
   11451 	if (error) {
   11452 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11453 		    __func__);
   11454 		return error;
   11455 	}
   11456 
   11457 	for (i = 0; i < words; i++) {
   11458 		/* The NVM part needs a byte offset, hence * 2 */
   11459 		act_offset = bank_offset + ((offset + i) * 2);
   11460 		/* but we must read dword aligned, so mask ... */
   11461 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11462 		if (error) {
   11463 			aprint_error_dev(sc->sc_dev,
   11464 			    "%s: failed to read NVM\n", __func__);
   11465 			break;
   11466 		}
   11467 		/* ... and pick out low or high word */
   11468 		if ((act_offset & 0x2) == 0)
   11469 			data[i] = (uint16_t)(dword & 0xFFFF);
   11470 		else
   11471 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11472 	}
   11473 
   11474 	wm_put_swfwhw_semaphore(sc);
   11475 	return error;
   11476 }
   11477 
   11478 /* iNVM */
   11479 
   11480 static int
   11481 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11482 {
   11483 	int32_t  rv = 0;
   11484 	uint32_t invm_dword;
   11485 	uint16_t i;
   11486 	uint8_t record_type, word_address;
   11487 
   11488 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11489 		device_xname(sc->sc_dev), __func__));
   11490 
   11491 	for (i = 0; i < INVM_SIZE; i++) {
   11492 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11493 		/* Get record type */
   11494 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11495 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11496 			break;
   11497 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11498 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11499 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11500 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11501 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11502 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11503 			if (word_address == address) {
   11504 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11505 				rv = 0;
   11506 				break;
   11507 			}
   11508 		}
   11509 	}
   11510 
   11511 	return rv;
   11512 }
   11513 
   11514 static int
   11515 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11516 {
   11517 	int rv = 0;
   11518 	int i;
   11519 
   11520 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11521 		device_xname(sc->sc_dev), __func__));
   11522 
   11523 	for (i = 0; i < words; i++) {
   11524 		switch (offset + i) {
   11525 		case NVM_OFF_MACADDR:
   11526 		case NVM_OFF_MACADDR1:
   11527 		case NVM_OFF_MACADDR2:
   11528 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11529 			if (rv != 0) {
   11530 				data[i] = 0xffff;
   11531 				rv = -1;
   11532 			}
   11533 			break;
   11534 		case NVM_OFF_CFG2:
   11535 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11536 			if (rv != 0) {
   11537 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11538 				rv = 0;
   11539 			}
   11540 			break;
   11541 		case NVM_OFF_CFG4:
   11542 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11543 			if (rv != 0) {
   11544 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11545 				rv = 0;
   11546 			}
   11547 			break;
   11548 		case NVM_OFF_LED_1_CFG:
   11549 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11550 			if (rv != 0) {
   11551 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11552 				rv = 0;
   11553 			}
   11554 			break;
   11555 		case NVM_OFF_LED_0_2_CFG:
   11556 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11557 			if (rv != 0) {
   11558 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11559 				rv = 0;
   11560 			}
   11561 			break;
   11562 		case NVM_OFF_ID_LED_SETTINGS:
   11563 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11564 			if (rv != 0) {
   11565 				*data = ID_LED_RESERVED_FFFF;
   11566 				rv = 0;
   11567 			}
   11568 			break;
   11569 		default:
   11570 			DPRINTF(WM_DEBUG_NVM,
   11571 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11572 			*data = NVM_RESERVED_WORD;
   11573 			break;
   11574 		}
   11575 	}
   11576 
   11577 	return rv;
   11578 }
   11579 
   11580 /* Lock, detecting NVM type, validate checksum, version and read */
   11581 
   11582 /*
   11583  * wm_nvm_acquire:
   11584  *
   11585  *	Perform the EEPROM handshake required on some chips.
   11586  */
   11587 static int
   11588 wm_nvm_acquire(struct wm_softc *sc)
   11589 {
   11590 	uint32_t reg;
   11591 	int x;
   11592 	int ret = 0;
   11593 
   11594 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11595 		device_xname(sc->sc_dev), __func__));
   11596 
   11597 	if (sc->sc_type >= WM_T_ICH8) {
   11598 		ret = wm_get_nvm_ich8lan(sc);
   11599 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11600 		ret = wm_get_swfwhw_semaphore(sc);
   11601 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11602 		/* This will also do wm_get_swsm_semaphore() if needed */
   11603 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11604 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11605 		ret = wm_get_swsm_semaphore(sc);
   11606 	}
   11607 
   11608 	if (ret) {
   11609 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11610 			__func__);
   11611 		return 1;
   11612 	}
   11613 
   11614 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11615 		reg = CSR_READ(sc, WMREG_EECD);
   11616 
   11617 		/* Request EEPROM access. */
   11618 		reg |= EECD_EE_REQ;
   11619 		CSR_WRITE(sc, WMREG_EECD, reg);
   11620 
   11621 		/* ..and wait for it to be granted. */
   11622 		for (x = 0; x < 1000; x++) {
   11623 			reg = CSR_READ(sc, WMREG_EECD);
   11624 			if (reg & EECD_EE_GNT)
   11625 				break;
   11626 			delay(5);
   11627 		}
   11628 		if ((reg & EECD_EE_GNT) == 0) {
   11629 			aprint_error_dev(sc->sc_dev,
   11630 			    "could not acquire EEPROM GNT\n");
   11631 			reg &= ~EECD_EE_REQ;
   11632 			CSR_WRITE(sc, WMREG_EECD, reg);
   11633 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11634 				wm_put_swfwhw_semaphore(sc);
   11635 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   11636 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11637 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11638 				wm_put_swsm_semaphore(sc);
   11639 			return 1;
   11640 		}
   11641 	}
   11642 
   11643 	return 0;
   11644 }
   11645 
   11646 /*
   11647  * wm_nvm_release:
   11648  *
   11649  *	Release the EEPROM mutex.
   11650  */
   11651 static void
   11652 wm_nvm_release(struct wm_softc *sc)
   11653 {
   11654 	uint32_t reg;
   11655 
   11656 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11657 		device_xname(sc->sc_dev), __func__));
   11658 
   11659 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11660 		reg = CSR_READ(sc, WMREG_EECD);
   11661 		reg &= ~EECD_EE_REQ;
   11662 		CSR_WRITE(sc, WMREG_EECD, reg);
   11663 	}
   11664 
   11665 	if (sc->sc_type >= WM_T_ICH8) {
   11666 		wm_put_nvm_ich8lan(sc);
   11667 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11668 		wm_put_swfwhw_semaphore(sc);
   11669 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   11670 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11671 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11672 		wm_put_swsm_semaphore(sc);
   11673 }
   11674 
   11675 static int
   11676 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   11677 {
   11678 	uint32_t eecd = 0;
   11679 
   11680 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   11681 	    || sc->sc_type == WM_T_82583) {
   11682 		eecd = CSR_READ(sc, WMREG_EECD);
   11683 
   11684 		/* Isolate bits 15 & 16 */
   11685 		eecd = ((eecd >> 15) & 0x03);
   11686 
   11687 		/* If both bits are set, device is Flash type */
   11688 		if (eecd == 0x03)
   11689 			return 0;
   11690 	}
   11691 	return 1;
   11692 }
   11693 
   11694 static int
   11695 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   11696 {
   11697 	uint32_t eec;
   11698 
   11699 	eec = CSR_READ(sc, WMREG_EEC);
   11700 	if ((eec & EEC_FLASH_DETECTED) != 0)
   11701 		return 1;
   11702 
   11703 	return 0;
   11704 }
   11705 
   11706 /*
   11707  * wm_nvm_validate_checksum
   11708  *
   11709  * The checksum is defined as the sum of the first 64 (16 bit) words.
   11710  */
   11711 static int
   11712 wm_nvm_validate_checksum(struct wm_softc *sc)
   11713 {
   11714 	uint16_t checksum;
   11715 	uint16_t eeprom_data;
   11716 #ifdef WM_DEBUG
   11717 	uint16_t csum_wordaddr, valid_checksum;
   11718 #endif
   11719 	int i;
   11720 
   11721 	checksum = 0;
   11722 
   11723 	/* Don't check for I211 */
   11724 	if (sc->sc_type == WM_T_I211)
   11725 		return 0;
   11726 
   11727 #ifdef WM_DEBUG
   11728 	if (sc->sc_type == WM_T_PCH_LPT) {
   11729 		csum_wordaddr = NVM_OFF_COMPAT;
   11730 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   11731 	} else {
   11732 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   11733 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   11734 	}
   11735 
   11736 	/* Dump EEPROM image for debug */
   11737 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11738 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11739 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   11740 		/* XXX PCH_SPT? */
   11741 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   11742 		if ((eeprom_data & valid_checksum) == 0) {
   11743 			DPRINTF(WM_DEBUG_NVM,
   11744 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   11745 				device_xname(sc->sc_dev), eeprom_data,
   11746 				    valid_checksum));
   11747 		}
   11748 	}
   11749 
   11750 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   11751 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   11752 		for (i = 0; i < NVM_SIZE; i++) {
   11753 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11754 				printf("XXXX ");
   11755 			else
   11756 				printf("%04hx ", eeprom_data);
   11757 			if (i % 8 == 7)
   11758 				printf("\n");
   11759 		}
   11760 	}
   11761 
   11762 #endif /* WM_DEBUG */
   11763 
   11764 	for (i = 0; i < NVM_SIZE; i++) {
   11765 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11766 			return 1;
   11767 		checksum += eeprom_data;
   11768 	}
   11769 
   11770 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   11771 #ifdef WM_DEBUG
   11772 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   11773 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   11774 #endif
   11775 	}
   11776 
   11777 	return 0;
   11778 }
   11779 
   11780 static void
   11781 wm_nvm_version_invm(struct wm_softc *sc)
   11782 {
   11783 	uint32_t dword;
   11784 
   11785 	/*
   11786 	 * Linux's code to decode version is very strange, so we don't
   11787 	 * obey that algorithm and just use word 61 as the document.
   11788 	 * Perhaps it's not perfect though...
   11789 	 *
   11790 	 * Example:
   11791 	 *
   11792 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   11793 	 */
   11794 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   11795 	dword = __SHIFTOUT(dword, INVM_VER_1);
   11796 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   11797 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   11798 }
   11799 
   11800 static void
   11801 wm_nvm_version(struct wm_softc *sc)
   11802 {
   11803 	uint16_t major, minor, build, patch;
   11804 	uint16_t uid0, uid1;
   11805 	uint16_t nvm_data;
   11806 	uint16_t off;
   11807 	bool check_version = false;
   11808 	bool check_optionrom = false;
   11809 	bool have_build = false;
   11810 
   11811 	/*
   11812 	 * Version format:
   11813 	 *
   11814 	 * XYYZ
   11815 	 * X0YZ
   11816 	 * X0YY
   11817 	 *
   11818 	 * Example:
   11819 	 *
   11820 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   11821 	 *	82571	0x50a6	5.10.6?
   11822 	 *	82572	0x506a	5.6.10?
   11823 	 *	82572EI	0x5069	5.6.9?
   11824 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   11825 	 *		0x2013	2.1.3?
   11826 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   11827 	 */
   11828 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   11829 	switch (sc->sc_type) {
   11830 	case WM_T_82571:
   11831 	case WM_T_82572:
   11832 	case WM_T_82574:
   11833 	case WM_T_82583:
   11834 		check_version = true;
   11835 		check_optionrom = true;
   11836 		have_build = true;
   11837 		break;
   11838 	case WM_T_82575:
   11839 	case WM_T_82576:
   11840 	case WM_T_82580:
   11841 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   11842 			check_version = true;
   11843 		break;
   11844 	case WM_T_I211:
   11845 		wm_nvm_version_invm(sc);
   11846 		goto printver;
   11847 	case WM_T_I210:
   11848 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   11849 			wm_nvm_version_invm(sc);
   11850 			goto printver;
   11851 		}
   11852 		/* FALLTHROUGH */
   11853 	case WM_T_I350:
   11854 	case WM_T_I354:
   11855 		check_version = true;
   11856 		check_optionrom = true;
   11857 		break;
   11858 	default:
   11859 		return;
   11860 	}
   11861 	if (check_version) {
   11862 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   11863 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   11864 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   11865 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   11866 			build = nvm_data & NVM_BUILD_MASK;
   11867 			have_build = true;
   11868 		} else
   11869 			minor = nvm_data & 0x00ff;
   11870 
   11871 		/* Decimal */
   11872 		minor = (minor / 16) * 10 + (minor % 16);
   11873 		sc->sc_nvm_ver_major = major;
   11874 		sc->sc_nvm_ver_minor = minor;
   11875 
   11876 printver:
   11877 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   11878 		    sc->sc_nvm_ver_minor);
   11879 		if (have_build) {
   11880 			sc->sc_nvm_ver_build = build;
   11881 			aprint_verbose(".%d", build);
   11882 		}
   11883 	}
   11884 	if (check_optionrom) {
   11885 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   11886 		/* Option ROM Version */
   11887 		if ((off != 0x0000) && (off != 0xffff)) {
   11888 			off += NVM_COMBO_VER_OFF;
   11889 			wm_nvm_read(sc, off + 1, 1, &uid1);
   11890 			wm_nvm_read(sc, off, 1, &uid0);
   11891 			if ((uid0 != 0) && (uid0 != 0xffff)
   11892 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   11893 				/* 16bits */
   11894 				major = uid0 >> 8;
   11895 				build = (uid0 << 8) | (uid1 >> 8);
   11896 				patch = uid1 & 0x00ff;
   11897 				aprint_verbose(", option ROM Version %d.%d.%d",
   11898 				    major, build, patch);
   11899 			}
   11900 		}
   11901 	}
   11902 
   11903 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   11904 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   11905 }
   11906 
   11907 /*
   11908  * wm_nvm_read:
   11909  *
   11910  *	Read data from the serial EEPROM.
   11911  */
   11912 static int
   11913 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11914 {
   11915 	int rv;
   11916 
   11917 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11918 		device_xname(sc->sc_dev), __func__));
   11919 
   11920 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   11921 		return 1;
   11922 
   11923 	if (wm_nvm_acquire(sc))
   11924 		return 1;
   11925 
   11926 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11927 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11928 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11929 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11930 	else if (sc->sc_type == WM_T_PCH_SPT)
   11931 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11932 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11933 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11934 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11935 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11936 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11937 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11938 	else
   11939 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11940 
   11941 	wm_nvm_release(sc);
   11942 	return rv;
   11943 }
   11944 
   11945 /*
   11946  * Hardware semaphores.
   11947  * Very complexed...
   11948  */
   11949 
   11950 static int
   11951 wm_get_null(struct wm_softc *sc)
   11952 {
   11953 
   11954 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11955 		device_xname(sc->sc_dev), __func__));
   11956 	return 0;
   11957 }
   11958 
   11959 static void
   11960 wm_put_null(struct wm_softc *sc)
   11961 {
   11962 
   11963 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11964 		device_xname(sc->sc_dev), __func__));
   11965 	return;
   11966 }
   11967 
   11968 /*
   11969  * Get hardware semaphore.
   11970  * Same as e1000_get_hw_semaphore_generic()
   11971  */
   11972 static int
   11973 wm_get_swsm_semaphore(struct wm_softc *sc)
   11974 {
   11975 	int32_t timeout;
   11976 	uint32_t swsm;
   11977 
   11978 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11979 		device_xname(sc->sc_dev), __func__));
   11980 	KASSERT(sc->sc_nvm_wordsize > 0);
   11981 
   11982 	/* Get the SW semaphore. */
   11983 	timeout = sc->sc_nvm_wordsize + 1;
   11984 	while (timeout) {
   11985 		swsm = CSR_READ(sc, WMREG_SWSM);
   11986 
   11987 		if ((swsm & SWSM_SMBI) == 0)
   11988 			break;
   11989 
   11990 		delay(50);
   11991 		timeout--;
   11992 	}
   11993 
   11994 	if (timeout == 0) {
   11995 		aprint_error_dev(sc->sc_dev,
   11996 		    "could not acquire SWSM SMBI\n");
   11997 		return 1;
   11998 	}
   11999 
   12000 	/* Get the FW semaphore. */
   12001 	timeout = sc->sc_nvm_wordsize + 1;
   12002 	while (timeout) {
   12003 		swsm = CSR_READ(sc, WMREG_SWSM);
   12004 		swsm |= SWSM_SWESMBI;
   12005 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12006 		/* If we managed to set the bit we got the semaphore. */
   12007 		swsm = CSR_READ(sc, WMREG_SWSM);
   12008 		if (swsm & SWSM_SWESMBI)
   12009 			break;
   12010 
   12011 		delay(50);
   12012 		timeout--;
   12013 	}
   12014 
   12015 	if (timeout == 0) {
   12016 		aprint_error_dev(sc->sc_dev,
   12017 		    "could not acquire SWSM SWESMBI\n");
   12018 		/* Release semaphores */
   12019 		wm_put_swsm_semaphore(sc);
   12020 		return 1;
   12021 	}
   12022 	return 0;
   12023 }
   12024 
   12025 /*
   12026  * Put hardware semaphore.
   12027  * Same as e1000_put_hw_semaphore_generic()
   12028  */
   12029 static void
   12030 wm_put_swsm_semaphore(struct wm_softc *sc)
   12031 {
   12032 	uint32_t swsm;
   12033 
   12034 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12035 		device_xname(sc->sc_dev), __func__));
   12036 
   12037 	swsm = CSR_READ(sc, WMREG_SWSM);
   12038 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12039 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12040 }
   12041 
   12042 /*
   12043  * Get SW/FW semaphore.
   12044  * Same as e1000_acquire_swfw_sync_82575().
   12045  */
   12046 static int
   12047 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12048 {
   12049 	uint32_t swfw_sync;
   12050 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12051 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12052 	int timeout = 200;
   12053 
   12054 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12055 		device_xname(sc->sc_dev), __func__));
   12056 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12057 
   12058 	for (timeout = 0; timeout < 200; timeout++) {
   12059 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12060 			if (wm_get_swsm_semaphore(sc)) {
   12061 				aprint_error_dev(sc->sc_dev,
   12062 				    "%s: failed to get semaphore\n",
   12063 				    __func__);
   12064 				return 1;
   12065 			}
   12066 		}
   12067 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12068 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12069 			swfw_sync |= swmask;
   12070 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12071 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12072 				wm_put_swsm_semaphore(sc);
   12073 			return 0;
   12074 		}
   12075 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12076 			wm_put_swsm_semaphore(sc);
   12077 		delay(5000);
   12078 	}
   12079 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12080 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12081 	return 1;
   12082 }
   12083 
   12084 static void
   12085 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12086 {
   12087 	uint32_t swfw_sync;
   12088 
   12089 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12090 		device_xname(sc->sc_dev), __func__));
   12091 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12092 
   12093 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12094 		while (wm_get_swsm_semaphore(sc) != 0)
   12095 			continue;
   12096 	}
   12097 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12098 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12099 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12100 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12101 		wm_put_swsm_semaphore(sc);
   12102 }
   12103 
   12104 static int
   12105 wm_get_phy_82575(struct wm_softc *sc)
   12106 {
   12107 
   12108 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12109 		device_xname(sc->sc_dev), __func__));
   12110 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12111 }
   12112 
   12113 static void
   12114 wm_put_phy_82575(struct wm_softc *sc)
   12115 {
   12116 
   12117 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12118 		device_xname(sc->sc_dev), __func__));
   12119 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12120 }
   12121 
   12122 static int
   12123 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12124 {
   12125 	uint32_t ext_ctrl;
   12126 	int timeout = 200;
   12127 
   12128 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12129 		device_xname(sc->sc_dev), __func__));
   12130 
   12131 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12132 	for (timeout = 0; timeout < 200; timeout++) {
   12133 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12134 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12135 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12136 
   12137 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12138 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12139 			return 0;
   12140 		delay(5000);
   12141 	}
   12142 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12143 	    device_xname(sc->sc_dev), ext_ctrl);
   12144 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12145 	return 1;
   12146 }
   12147 
   12148 static void
   12149 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12150 {
   12151 	uint32_t ext_ctrl;
   12152 
   12153 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12154 		device_xname(sc->sc_dev), __func__));
   12155 
   12156 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12157 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12158 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12159 
   12160 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12161 }
   12162 
   12163 static int
   12164 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12165 {
   12166 	uint32_t ext_ctrl;
   12167 	int timeout;
   12168 
   12169 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12170 		device_xname(sc->sc_dev), __func__));
   12171 	mutex_enter(sc->sc_ich_phymtx);
   12172 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12173 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12174 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12175 			break;
   12176 		delay(1000);
   12177 	}
   12178 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12179 		printf("%s: SW has already locked the resource\n",
   12180 		    device_xname(sc->sc_dev));
   12181 		goto out;
   12182 	}
   12183 
   12184 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12185 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12186 	for (timeout = 0; timeout < 1000; timeout++) {
   12187 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12188 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12189 			break;
   12190 		delay(1000);
   12191 	}
   12192 	if (timeout >= 1000) {
   12193 		printf("%s: failed to acquire semaphore\n",
   12194 		    device_xname(sc->sc_dev));
   12195 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12196 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12197 		goto out;
   12198 	}
   12199 	return 0;
   12200 
   12201 out:
   12202 	mutex_exit(sc->sc_ich_phymtx);
   12203 	return 1;
   12204 }
   12205 
   12206 static void
   12207 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12208 {
   12209 	uint32_t ext_ctrl;
   12210 
   12211 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12212 		device_xname(sc->sc_dev), __func__));
   12213 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12214 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12215 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12216 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12217 	} else {
   12218 		printf("%s: Semaphore unexpectedly released\n",
   12219 		    device_xname(sc->sc_dev));
   12220 	}
   12221 
   12222 	mutex_exit(sc->sc_ich_phymtx);
   12223 }
   12224 
   12225 static int
   12226 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12227 {
   12228 
   12229 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12230 		device_xname(sc->sc_dev), __func__));
   12231 	mutex_enter(sc->sc_ich_nvmmtx);
   12232 
   12233 	return 0;
   12234 }
   12235 
   12236 static void
   12237 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12238 {
   12239 
   12240 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12241 		device_xname(sc->sc_dev), __func__));
   12242 	mutex_exit(sc->sc_ich_nvmmtx);
   12243 }
   12244 
   12245 static int
   12246 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12247 {
   12248 	int i = 0;
   12249 	uint32_t reg;
   12250 
   12251 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12252 		device_xname(sc->sc_dev), __func__));
   12253 
   12254 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12255 	do {
   12256 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12257 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12258 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12259 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12260 			break;
   12261 		delay(2*1000);
   12262 		i++;
   12263 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12264 
   12265 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12266 		wm_put_hw_semaphore_82573(sc);
   12267 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12268 		    device_xname(sc->sc_dev));
   12269 		return -1;
   12270 	}
   12271 
   12272 	return 0;
   12273 }
   12274 
   12275 static void
   12276 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12277 {
   12278 	uint32_t reg;
   12279 
   12280 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12281 		device_xname(sc->sc_dev), __func__));
   12282 
   12283 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12284 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12285 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12286 }
   12287 
   12288 /*
   12289  * Management mode and power management related subroutines.
   12290  * BMC, AMT, suspend/resume and EEE.
   12291  */
   12292 
   12293 #ifdef WM_WOL
   12294 static int
   12295 wm_check_mng_mode(struct wm_softc *sc)
   12296 {
   12297 	int rv;
   12298 
   12299 	switch (sc->sc_type) {
   12300 	case WM_T_ICH8:
   12301 	case WM_T_ICH9:
   12302 	case WM_T_ICH10:
   12303 	case WM_T_PCH:
   12304 	case WM_T_PCH2:
   12305 	case WM_T_PCH_LPT:
   12306 	case WM_T_PCH_SPT:
   12307 		rv = wm_check_mng_mode_ich8lan(sc);
   12308 		break;
   12309 	case WM_T_82574:
   12310 	case WM_T_82583:
   12311 		rv = wm_check_mng_mode_82574(sc);
   12312 		break;
   12313 	case WM_T_82571:
   12314 	case WM_T_82572:
   12315 	case WM_T_82573:
   12316 	case WM_T_80003:
   12317 		rv = wm_check_mng_mode_generic(sc);
   12318 		break;
   12319 	default:
   12320 		/* noting to do */
   12321 		rv = 0;
   12322 		break;
   12323 	}
   12324 
   12325 	return rv;
   12326 }
   12327 
   12328 static int
   12329 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12330 {
   12331 	uint32_t fwsm;
   12332 
   12333 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12334 
   12335 	if (((fwsm & FWSM_FW_VALID) != 0)
   12336 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12337 		return 1;
   12338 
   12339 	return 0;
   12340 }
   12341 
   12342 static int
   12343 wm_check_mng_mode_82574(struct wm_softc *sc)
   12344 {
   12345 	uint16_t data;
   12346 
   12347 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12348 
   12349 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12350 		return 1;
   12351 
   12352 	return 0;
   12353 }
   12354 
   12355 static int
   12356 wm_check_mng_mode_generic(struct wm_softc *sc)
   12357 {
   12358 	uint32_t fwsm;
   12359 
   12360 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12361 
   12362 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12363 		return 1;
   12364 
   12365 	return 0;
   12366 }
   12367 #endif /* WM_WOL */
   12368 
   12369 static int
   12370 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12371 {
   12372 	uint32_t manc, fwsm, factps;
   12373 
   12374 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12375 		return 0;
   12376 
   12377 	manc = CSR_READ(sc, WMREG_MANC);
   12378 
   12379 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12380 		device_xname(sc->sc_dev), manc));
   12381 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12382 		return 0;
   12383 
   12384 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12385 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12386 		factps = CSR_READ(sc, WMREG_FACTPS);
   12387 		if (((factps & FACTPS_MNGCG) == 0)
   12388 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12389 			return 1;
   12390 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12391 		uint16_t data;
   12392 
   12393 		factps = CSR_READ(sc, WMREG_FACTPS);
   12394 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12395 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12396 			device_xname(sc->sc_dev), factps, data));
   12397 		if (((factps & FACTPS_MNGCG) == 0)
   12398 		    && ((data & NVM_CFG2_MNGM_MASK)
   12399 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12400 			return 1;
   12401 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12402 	    && ((manc & MANC_ASF_EN) == 0))
   12403 		return 1;
   12404 
   12405 	return 0;
   12406 }
   12407 
   12408 static bool
   12409 wm_phy_resetisblocked(struct wm_softc *sc)
   12410 {
   12411 	bool blocked = false;
   12412 	uint32_t reg;
   12413 	int i = 0;
   12414 
   12415 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12416 		device_xname(sc->sc_dev), __func__));
   12417 
   12418 	switch (sc->sc_type) {
   12419 	case WM_T_ICH8:
   12420 	case WM_T_ICH9:
   12421 	case WM_T_ICH10:
   12422 	case WM_T_PCH:
   12423 	case WM_T_PCH2:
   12424 	case WM_T_PCH_LPT:
   12425 	case WM_T_PCH_SPT:
   12426 		do {
   12427 			reg = CSR_READ(sc, WMREG_FWSM);
   12428 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12429 				blocked = true;
   12430 				delay(10*1000);
   12431 				continue;
   12432 			}
   12433 			blocked = false;
   12434 		} while (blocked && (i++ < 30));
   12435 		return blocked;
   12436 		break;
   12437 	case WM_T_82571:
   12438 	case WM_T_82572:
   12439 	case WM_T_82573:
   12440 	case WM_T_82574:
   12441 	case WM_T_82583:
   12442 	case WM_T_80003:
   12443 		reg = CSR_READ(sc, WMREG_MANC);
   12444 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12445 			return true;
   12446 		else
   12447 			return false;
   12448 		break;
   12449 	default:
   12450 		/* no problem */
   12451 		break;
   12452 	}
   12453 
   12454 	return false;
   12455 }
   12456 
   12457 static void
   12458 wm_get_hw_control(struct wm_softc *sc)
   12459 {
   12460 	uint32_t reg;
   12461 
   12462 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12463 		device_xname(sc->sc_dev), __func__));
   12464 
   12465 	if (sc->sc_type == WM_T_82573) {
   12466 		reg = CSR_READ(sc, WMREG_SWSM);
   12467 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12468 	} else if (sc->sc_type >= WM_T_82571) {
   12469 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12470 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12471 	}
   12472 }
   12473 
   12474 static void
   12475 wm_release_hw_control(struct wm_softc *sc)
   12476 {
   12477 	uint32_t reg;
   12478 
   12479 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12480 		device_xname(sc->sc_dev), __func__));
   12481 
   12482 	if (sc->sc_type == WM_T_82573) {
   12483 		reg = CSR_READ(sc, WMREG_SWSM);
   12484 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12485 	} else if (sc->sc_type >= WM_T_82571) {
   12486 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12487 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12488 	}
   12489 }
   12490 
   12491 static void
   12492 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12493 {
   12494 	uint32_t reg;
   12495 
   12496 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12497 		device_xname(sc->sc_dev), __func__));
   12498 
   12499 	if (sc->sc_type < WM_T_PCH2)
   12500 		return;
   12501 
   12502 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12503 
   12504 	if (gate)
   12505 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12506 	else
   12507 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12508 
   12509 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12510 }
   12511 
   12512 static void
   12513 wm_smbustopci(struct wm_softc *sc)
   12514 {
   12515 	uint32_t fwsm, reg;
   12516 	int rv = 0;
   12517 
   12518 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12519 		device_xname(sc->sc_dev), __func__));
   12520 
   12521 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12522 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12523 
   12524 	/* Disable ULP */
   12525 	wm_ulp_disable(sc);
   12526 
   12527 	/* Acquire PHY semaphore */
   12528 	sc->phy.acquire(sc);
   12529 
   12530 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12531 	switch (sc->sc_type) {
   12532 	case WM_T_PCH_LPT:
   12533 	case WM_T_PCH_SPT:
   12534 		if (wm_phy_is_accessible_pchlan(sc))
   12535 			break;
   12536 
   12537 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12538 		reg |= CTRL_EXT_FORCE_SMBUS;
   12539 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12540 #if 0
   12541 		/* XXX Isn't this required??? */
   12542 		CSR_WRITE_FLUSH(sc);
   12543 #endif
   12544 		delay(50 * 1000);
   12545 		/* FALLTHROUGH */
   12546 	case WM_T_PCH2:
   12547 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12548 			break;
   12549 		/* FALLTHROUGH */
   12550 	case WM_T_PCH:
   12551 		if (sc->sc_type == WM_T_PCH)
   12552 			if ((fwsm & FWSM_FW_VALID) != 0)
   12553 				break;
   12554 
   12555 		if (wm_phy_resetisblocked(sc) == true) {
   12556 			printf("XXX reset is blocked(3)\n");
   12557 			break;
   12558 		}
   12559 
   12560 		wm_toggle_lanphypc_pch_lpt(sc);
   12561 
   12562 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12563 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12564 				break;
   12565 
   12566 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12567 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12568 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12569 
   12570 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12571 				break;
   12572 			rv = -1;
   12573 		}
   12574 		break;
   12575 	default:
   12576 		break;
   12577 	}
   12578 
   12579 	/* Release semaphore */
   12580 	sc->phy.release(sc);
   12581 
   12582 	if (rv == 0) {
   12583 		if (wm_phy_resetisblocked(sc)) {
   12584 			printf("XXX reset is blocked(4)\n");
   12585 			goto out;
   12586 		}
   12587 		wm_reset_phy(sc);
   12588 		if (wm_phy_resetisblocked(sc))
   12589 			printf("XXX reset is blocked(4)\n");
   12590 	}
   12591 
   12592 out:
   12593 	/*
   12594 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12595 	 */
   12596 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12597 		delay(10*1000);
   12598 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12599 	}
   12600 }
   12601 
   12602 static void
   12603 wm_init_manageability(struct wm_softc *sc)
   12604 {
   12605 
   12606 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12607 		device_xname(sc->sc_dev), __func__));
   12608 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12609 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12610 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12611 
   12612 		/* Disable hardware interception of ARP */
   12613 		manc &= ~MANC_ARP_EN;
   12614 
   12615 		/* Enable receiving management packets to the host */
   12616 		if (sc->sc_type >= WM_T_82571) {
   12617 			manc |= MANC_EN_MNG2HOST;
   12618 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12619 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12620 		}
   12621 
   12622 		CSR_WRITE(sc, WMREG_MANC, manc);
   12623 	}
   12624 }
   12625 
   12626 static void
   12627 wm_release_manageability(struct wm_softc *sc)
   12628 {
   12629 
   12630 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12631 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12632 
   12633 		manc |= MANC_ARP_EN;
   12634 		if (sc->sc_type >= WM_T_82571)
   12635 			manc &= ~MANC_EN_MNG2HOST;
   12636 
   12637 		CSR_WRITE(sc, WMREG_MANC, manc);
   12638 	}
   12639 }
   12640 
   12641 static void
   12642 wm_get_wakeup(struct wm_softc *sc)
   12643 {
   12644 
   12645 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   12646 	switch (sc->sc_type) {
   12647 	case WM_T_82573:
   12648 	case WM_T_82583:
   12649 		sc->sc_flags |= WM_F_HAS_AMT;
   12650 		/* FALLTHROUGH */
   12651 	case WM_T_80003:
   12652 	case WM_T_82575:
   12653 	case WM_T_82576:
   12654 	case WM_T_82580:
   12655 	case WM_T_I350:
   12656 	case WM_T_I354:
   12657 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   12658 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   12659 		/* FALLTHROUGH */
   12660 	case WM_T_82541:
   12661 	case WM_T_82541_2:
   12662 	case WM_T_82547:
   12663 	case WM_T_82547_2:
   12664 	case WM_T_82571:
   12665 	case WM_T_82572:
   12666 	case WM_T_82574:
   12667 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12668 		break;
   12669 	case WM_T_ICH8:
   12670 	case WM_T_ICH9:
   12671 	case WM_T_ICH10:
   12672 	case WM_T_PCH:
   12673 	case WM_T_PCH2:
   12674 	case WM_T_PCH_LPT:
   12675 	case WM_T_PCH_SPT:
   12676 		sc->sc_flags |= WM_F_HAS_AMT;
   12677 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12678 		break;
   12679 	default:
   12680 		break;
   12681 	}
   12682 
   12683 	/* 1: HAS_MANAGE */
   12684 	if (wm_enable_mng_pass_thru(sc) != 0)
   12685 		sc->sc_flags |= WM_F_HAS_MANAGE;
   12686 
   12687 #ifdef WM_DEBUG
   12688 	printf("\n");
   12689 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   12690 		printf("HAS_AMT,");
   12691 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   12692 		printf("ARC_SUBSYS_VALID,");
   12693 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   12694 		printf("ASF_FIRMWARE_PRES,");
   12695 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   12696 		printf("HAS_MANAGE,");
   12697 	printf("\n");
   12698 #endif
   12699 	/*
   12700 	 * Note that the WOL flags is set after the resetting of the eeprom
   12701 	 * stuff
   12702 	 */
   12703 }
   12704 
   12705 /*
   12706  * Unconfigure Ultra Low Power mode.
   12707  * Only for I217 and newer (see below).
   12708  */
   12709 static void
   12710 wm_ulp_disable(struct wm_softc *sc)
   12711 {
   12712 	uint32_t reg;
   12713 	int i = 0;
   12714 
   12715 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12716 		device_xname(sc->sc_dev), __func__));
   12717 	/* Exclude old devices */
   12718 	if ((sc->sc_type < WM_T_PCH_LPT)
   12719 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   12720 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   12721 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   12722 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   12723 		return;
   12724 
   12725 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   12726 		/* Request ME un-configure ULP mode in the PHY */
   12727 		reg = CSR_READ(sc, WMREG_H2ME);
   12728 		reg &= ~H2ME_ULP;
   12729 		reg |= H2ME_ENFORCE_SETTINGS;
   12730 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12731 
   12732 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   12733 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   12734 			if (i++ == 30) {
   12735 				printf("%s timed out\n", __func__);
   12736 				return;
   12737 			}
   12738 			delay(10 * 1000);
   12739 		}
   12740 		reg = CSR_READ(sc, WMREG_H2ME);
   12741 		reg &= ~H2ME_ENFORCE_SETTINGS;
   12742 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12743 
   12744 		return;
   12745 	}
   12746 
   12747 	/* Acquire semaphore */
   12748 	sc->phy.acquire(sc);
   12749 
   12750 	/* Toggle LANPHYPC */
   12751 	wm_toggle_lanphypc_pch_lpt(sc);
   12752 
   12753 	/* Unforce SMBus mode in PHY */
   12754 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12755 	if (reg == 0x0000 || reg == 0xffff) {
   12756 		uint32_t reg2;
   12757 
   12758 		printf("%s: Force SMBus first.\n", __func__);
   12759 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   12760 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   12761 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   12762 		delay(50 * 1000);
   12763 
   12764 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12765 	}
   12766 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   12767 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   12768 
   12769 	/* Unforce SMBus mode in MAC */
   12770 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12771 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   12772 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12773 
   12774 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   12775 	reg |= HV_PM_CTRL_K1_ENA;
   12776 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   12777 
   12778 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   12779 	reg &= ~(I218_ULP_CONFIG1_IND
   12780 	    | I218_ULP_CONFIG1_STICKY_ULP
   12781 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   12782 	    | I218_ULP_CONFIG1_WOL_HOST
   12783 	    | I218_ULP_CONFIG1_INBAND_EXIT
   12784 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   12785 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   12786 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   12787 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12788 	reg |= I218_ULP_CONFIG1_START;
   12789 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12790 
   12791 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   12792 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   12793 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   12794 
   12795 	/* Release semaphore */
   12796 	sc->phy.release(sc);
   12797 	wm_gmii_reset(sc);
   12798 	delay(50 * 1000);
   12799 }
   12800 
   12801 /* WOL in the newer chipset interfaces (pchlan) */
   12802 static void
   12803 wm_enable_phy_wakeup(struct wm_softc *sc)
   12804 {
   12805 #if 0
   12806 	uint16_t preg;
   12807 
   12808 	/* Copy MAC RARs to PHY RARs */
   12809 
   12810 	/* Copy MAC MTA to PHY MTA */
   12811 
   12812 	/* Configure PHY Rx Control register */
   12813 
   12814 	/* Enable PHY wakeup in MAC register */
   12815 
   12816 	/* Configure and enable PHY wakeup in PHY registers */
   12817 
   12818 	/* Activate PHY wakeup */
   12819 
   12820 	/* XXX */
   12821 #endif
   12822 }
   12823 
   12824 /* Power down workaround on D3 */
   12825 static void
   12826 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   12827 {
   12828 	uint32_t reg;
   12829 	int i;
   12830 
   12831 	for (i = 0; i < 2; i++) {
   12832 		/* Disable link */
   12833 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12834 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12835 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12836 
   12837 		/*
   12838 		 * Call gig speed drop workaround on Gig disable before
   12839 		 * accessing any PHY registers
   12840 		 */
   12841 		if (sc->sc_type == WM_T_ICH8)
   12842 			wm_gig_downshift_workaround_ich8lan(sc);
   12843 
   12844 		/* Write VR power-down enable */
   12845 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12846 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12847 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   12848 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   12849 
   12850 		/* Read it back and test */
   12851 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12852 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12853 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   12854 			break;
   12855 
   12856 		/* Issue PHY reset and repeat at most one more time */
   12857 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   12858 	}
   12859 }
   12860 
   12861 static void
   12862 wm_enable_wakeup(struct wm_softc *sc)
   12863 {
   12864 	uint32_t reg, pmreg;
   12865 	pcireg_t pmode;
   12866 
   12867 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12868 		device_xname(sc->sc_dev), __func__));
   12869 
   12870 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12871 		&pmreg, NULL) == 0)
   12872 		return;
   12873 
   12874 	/* Advertise the wakeup capability */
   12875 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   12876 	    | CTRL_SWDPIN(3));
   12877 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   12878 
   12879 	/* ICH workaround */
   12880 	switch (sc->sc_type) {
   12881 	case WM_T_ICH8:
   12882 	case WM_T_ICH9:
   12883 	case WM_T_ICH10:
   12884 	case WM_T_PCH:
   12885 	case WM_T_PCH2:
   12886 	case WM_T_PCH_LPT:
   12887 	case WM_T_PCH_SPT:
   12888 		/* Disable gig during WOL */
   12889 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12890 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   12891 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12892 		if (sc->sc_type == WM_T_PCH)
   12893 			wm_gmii_reset(sc);
   12894 
   12895 		/* Power down workaround */
   12896 		if (sc->sc_phytype == WMPHY_82577) {
   12897 			struct mii_softc *child;
   12898 
   12899 			/* Assume that the PHY is copper */
   12900 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12901 			if (child->mii_mpd_rev <= 2)
   12902 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   12903 				    (768 << 5) | 25, 0x0444); /* magic num */
   12904 		}
   12905 		break;
   12906 	default:
   12907 		break;
   12908 	}
   12909 
   12910 	/* Keep the laser running on fiber adapters */
   12911 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   12912 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12913 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12914 		reg |= CTRL_EXT_SWDPIN(3);
   12915 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12916 	}
   12917 
   12918 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   12919 #if 0	/* for the multicast packet */
   12920 	reg |= WUFC_MC;
   12921 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   12922 #endif
   12923 
   12924 	if (sc->sc_type >= WM_T_PCH)
   12925 		wm_enable_phy_wakeup(sc);
   12926 	else {
   12927 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   12928 		CSR_WRITE(sc, WMREG_WUFC, reg);
   12929 	}
   12930 
   12931 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12932 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12933 		|| (sc->sc_type == WM_T_PCH2))
   12934 		    && (sc->sc_phytype == WMPHY_IGP_3))
   12935 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   12936 
   12937 	/* Request PME */
   12938 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   12939 #if 0
   12940 	/* Disable WOL */
   12941 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   12942 #else
   12943 	/* For WOL */
   12944 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   12945 #endif
   12946 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   12947 }
   12948 
   12949 /* LPLU */
   12950 
   12951 static void
   12952 wm_lplu_d0_disable(struct wm_softc *sc)
   12953 {
   12954 	uint32_t reg;
   12955 
   12956 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12957 		device_xname(sc->sc_dev), __func__));
   12958 
   12959 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12960 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   12961 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12962 }
   12963 
   12964 static void
   12965 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   12966 {
   12967 	uint32_t reg;
   12968 
   12969 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12970 		device_xname(sc->sc_dev), __func__));
   12971 
   12972 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   12973 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   12974 	reg |= HV_OEM_BITS_ANEGNOW;
   12975 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   12976 }
   12977 
   12978 /* EEE */
   12979 
   12980 static void
   12981 wm_set_eee_i350(struct wm_softc *sc)
   12982 {
   12983 	uint32_t ipcnfg, eeer;
   12984 
   12985 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   12986 	eeer = CSR_READ(sc, WMREG_EEER);
   12987 
   12988 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   12989 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12990 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12991 		    | EEER_LPI_FC);
   12992 	} else {
   12993 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12994 		ipcnfg &= ~IPCNFG_10BASE_TE;
   12995 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12996 		    | EEER_LPI_FC);
   12997 	}
   12998 
   12999 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13000 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13001 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13002 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13003 }
   13004 
   13005 /*
   13006  * Workarounds (mainly PHY related).
   13007  * Basically, PHY's workarounds are in the PHY drivers.
   13008  */
   13009 
   13010 /* Work-around for 82566 Kumeran PCS lock loss */
   13011 static void
   13012 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13013 {
   13014 #if 0
   13015 	int miistatus, active, i;
   13016 	int reg;
   13017 
   13018 	miistatus = sc->sc_mii.mii_media_status;
   13019 
   13020 	/* If the link is not up, do nothing */
   13021 	if ((miistatus & IFM_ACTIVE) == 0)
   13022 		return;
   13023 
   13024 	active = sc->sc_mii.mii_media_active;
   13025 
   13026 	/* Nothing to do if the link is other than 1Gbps */
   13027 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   13028 		return;
   13029 
   13030 	for (i = 0; i < 10; i++) {
   13031 		/* read twice */
   13032 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13033 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13034 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13035 			goto out;	/* GOOD! */
   13036 
   13037 		/* Reset the PHY */
   13038 		wm_gmii_reset(sc);
   13039 		delay(5*1000);
   13040 	}
   13041 
   13042 	/* Disable GigE link negotiation */
   13043 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13044 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13045 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13046 
   13047 	/*
   13048 	 * Call gig speed drop workaround on Gig disable before accessing
   13049 	 * any PHY registers.
   13050 	 */
   13051 	wm_gig_downshift_workaround_ich8lan(sc);
   13052 
   13053 out:
   13054 	return;
   13055 #endif
   13056 }
   13057 
   13058 /* WOL from S5 stops working */
   13059 static void
   13060 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13061 {
   13062 	uint16_t kmrn_reg;
   13063 
   13064 	/* Only for igp3 */
   13065 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13066 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13067 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13068 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13069 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13070 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13071 	}
   13072 }
   13073 
   13074 /*
   13075  * Workaround for pch's PHYs
   13076  * XXX should be moved to new PHY driver?
   13077  */
   13078 static void
   13079 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13080 {
   13081 
   13082 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13083 		device_xname(sc->sc_dev), __func__));
   13084 	KASSERT(sc->sc_type == WM_T_PCH);
   13085 
   13086 	if (sc->sc_phytype == WMPHY_82577)
   13087 		wm_set_mdio_slow_mode_hv(sc);
   13088 
   13089 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13090 
   13091 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13092 
   13093 	/* 82578 */
   13094 	if (sc->sc_phytype == WMPHY_82578) {
   13095 		struct mii_softc *child;
   13096 
   13097 		/*
   13098 		 * Return registers to default by doing a soft reset then
   13099 		 * writing 0x3140 to the control register
   13100 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13101 		 */
   13102 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13103 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13104 			PHY_RESET(child);
   13105 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13106 			    0x3140);
   13107 		}
   13108 	}
   13109 
   13110 	/* Select page 0 */
   13111 	sc->phy.acquire(sc);
   13112 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13113 	sc->phy.release(sc);
   13114 
   13115 	/*
   13116 	 * Configure the K1 Si workaround during phy reset assuming there is
   13117 	 * link so that it disables K1 if link is in 1Gbps.
   13118 	 */
   13119 	wm_k1_gig_workaround_hv(sc, 1);
   13120 }
   13121 
   13122 static void
   13123 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13124 {
   13125 
   13126 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13127 		device_xname(sc->sc_dev), __func__));
   13128 	KASSERT(sc->sc_type == WM_T_PCH2);
   13129 
   13130 	wm_set_mdio_slow_mode_hv(sc);
   13131 }
   13132 
   13133 static int
   13134 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13135 {
   13136 	int k1_enable = sc->sc_nvm_k1_enabled;
   13137 
   13138 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13139 		device_xname(sc->sc_dev), __func__));
   13140 
   13141 	if (sc->phy.acquire(sc) != 0)
   13142 		return -1;
   13143 
   13144 	if (link) {
   13145 		k1_enable = 0;
   13146 
   13147 		/* Link stall fix for link up */
   13148 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13149 	} else {
   13150 		/* Link stall fix for link down */
   13151 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13152 	}
   13153 
   13154 	wm_configure_k1_ich8lan(sc, k1_enable);
   13155 	sc->phy.release(sc);
   13156 
   13157 	return 0;
   13158 }
   13159 
   13160 static void
   13161 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13162 {
   13163 	uint32_t reg;
   13164 
   13165 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13166 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13167 	    reg | HV_KMRN_MDIO_SLOW);
   13168 }
   13169 
   13170 static void
   13171 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13172 {
   13173 	uint32_t ctrl, ctrl_ext, tmp;
   13174 	uint16_t kmrn_reg;
   13175 
   13176 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13177 
   13178 	if (k1_enable)
   13179 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13180 	else
   13181 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13182 
   13183 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13184 
   13185 	delay(20);
   13186 
   13187 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13188 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13189 
   13190 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13191 	tmp |= CTRL_FRCSPD;
   13192 
   13193 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13194 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13195 	CSR_WRITE_FLUSH(sc);
   13196 	delay(20);
   13197 
   13198 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13199 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13200 	CSR_WRITE_FLUSH(sc);
   13201 	delay(20);
   13202 }
   13203 
   13204 /* special case - for 82575 - need to do manual init ... */
   13205 static void
   13206 wm_reset_init_script_82575(struct wm_softc *sc)
   13207 {
   13208 	/*
   13209 	 * remark: this is untested code - we have no board without EEPROM
   13210 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13211 	 */
   13212 
   13213 	/* SerDes configuration via SERDESCTRL */
   13214 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13215 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13216 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13217 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13218 
   13219 	/* CCM configuration via CCMCTL register */
   13220 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13221 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13222 
   13223 	/* PCIe lanes configuration */
   13224 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13225 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13226 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13227 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13228 
   13229 	/* PCIe PLL Configuration */
   13230 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13231 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13232 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13233 }
   13234 
   13235 static void
   13236 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13237 {
   13238 	uint32_t reg;
   13239 	uint16_t nvmword;
   13240 	int rv;
   13241 
   13242 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13243 		return;
   13244 
   13245 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13246 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13247 	if (rv != 0) {
   13248 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13249 		    __func__);
   13250 		return;
   13251 	}
   13252 
   13253 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13254 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13255 		reg |= MDICNFG_DEST;
   13256 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13257 		reg |= MDICNFG_COM_MDIO;
   13258 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13259 }
   13260 
   13261 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13262 
   13263 static bool
   13264 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13265 {
   13266 	int i;
   13267 	uint32_t reg;
   13268 	uint16_t id1, id2;
   13269 
   13270 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13271 		device_xname(sc->sc_dev), __func__));
   13272 	id1 = id2 = 0xffff;
   13273 	for (i = 0; i < 2; i++) {
   13274 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13275 		if (MII_INVALIDID(id1))
   13276 			continue;
   13277 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13278 		if (MII_INVALIDID(id2))
   13279 			continue;
   13280 		break;
   13281 	}
   13282 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13283 		goto out;
   13284 	}
   13285 
   13286 	if (sc->sc_type < WM_T_PCH_LPT) {
   13287 		sc->phy.release(sc);
   13288 		wm_set_mdio_slow_mode_hv(sc);
   13289 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13290 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13291 		sc->phy.acquire(sc);
   13292 	}
   13293 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13294 		printf("XXX return with false\n");
   13295 		return false;
   13296 	}
   13297 out:
   13298 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13299 		/* Only unforce SMBus if ME is not active */
   13300 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13301 			/* Unforce SMBus mode in PHY */
   13302 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13303 			    CV_SMB_CTRL);
   13304 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13305 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13306 			    CV_SMB_CTRL, reg);
   13307 
   13308 			/* Unforce SMBus mode in MAC */
   13309 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13310 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13311 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13312 		}
   13313 	}
   13314 	return true;
   13315 }
   13316 
   13317 static void
   13318 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13319 {
   13320 	uint32_t reg;
   13321 	int i;
   13322 
   13323 	/* Set PHY Config Counter to 50msec */
   13324 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13325 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13326 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13327 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13328 
   13329 	/* Toggle LANPHYPC */
   13330 	reg = CSR_READ(sc, WMREG_CTRL);
   13331 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13332 	reg &= ~CTRL_LANPHYPC_VALUE;
   13333 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13334 	CSR_WRITE_FLUSH(sc);
   13335 	delay(1000);
   13336 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13337 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13338 	CSR_WRITE_FLUSH(sc);
   13339 
   13340 	if (sc->sc_type < WM_T_PCH_LPT)
   13341 		delay(50 * 1000);
   13342 	else {
   13343 		i = 20;
   13344 
   13345 		do {
   13346 			delay(5 * 1000);
   13347 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13348 		    && i--);
   13349 
   13350 		delay(30 * 1000);
   13351 	}
   13352 }
   13353 
   13354 static int
   13355 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13356 {
   13357 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13358 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13359 	uint32_t rxa;
   13360 	uint16_t scale = 0, lat_enc = 0;
   13361 	int64_t lat_ns, value;
   13362 
   13363 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13364 		device_xname(sc->sc_dev), __func__));
   13365 
   13366 	if (link) {
   13367 		pcireg_t preg;
   13368 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13369 
   13370 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13371 
   13372 		/*
   13373 		 * Determine the maximum latency tolerated by the device.
   13374 		 *
   13375 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13376 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13377 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13378 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13379 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13380 		 */
   13381 		lat_ns = ((int64_t)rxa * 1024 -
   13382 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   13383 		if (lat_ns < 0)
   13384 			lat_ns = 0;
   13385 		else {
   13386 			uint32_t status;
   13387 			uint16_t speed;
   13388 
   13389 			status = CSR_READ(sc, WMREG_STATUS);
   13390 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13391 			case STATUS_SPEED_10:
   13392 				speed = 10;
   13393 				break;
   13394 			case STATUS_SPEED_100:
   13395 				speed = 100;
   13396 				break;
   13397 			case STATUS_SPEED_1000:
   13398 				speed = 1000;
   13399 				break;
   13400 			default:
   13401 				printf("%s: Unknown speed (status = %08x)\n",
   13402 				    device_xname(sc->sc_dev), status);
   13403 				return -1;
   13404 			}
   13405 			lat_ns /= speed;
   13406 		}
   13407 		value = lat_ns;
   13408 
   13409 		while (value > LTRV_VALUE) {
   13410 			scale ++;
   13411 			value = howmany(value, __BIT(5));
   13412 		}
   13413 		if (scale > LTRV_SCALE_MAX) {
   13414 			printf("%s: Invalid LTR latency scale %d\n",
   13415 			    device_xname(sc->sc_dev), scale);
   13416 			return -1;
   13417 		}
   13418 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13419 
   13420 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13421 		    WM_PCI_LTR_CAP_LPT);
   13422 		max_snoop = preg & 0xffff;
   13423 		max_nosnoop = preg >> 16;
   13424 
   13425 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13426 
   13427 		if (lat_enc > max_ltr_enc) {
   13428 			lat_enc = max_ltr_enc;
   13429 		}
   13430 	}
   13431 	/* Snoop and No-Snoop latencies the same */
   13432 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13433 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13434 
   13435 	return 0;
   13436 }
   13437 
   13438 /*
   13439  * I210 Errata 25 and I211 Errata 10
   13440  * Slow System Clock.
   13441  */
   13442 static void
   13443 wm_pll_workaround_i210(struct wm_softc *sc)
   13444 {
   13445 	uint32_t mdicnfg, wuc;
   13446 	uint32_t reg;
   13447 	pcireg_t pcireg;
   13448 	uint32_t pmreg;
   13449 	uint16_t nvmword, tmp_nvmword;
   13450 	int phyval;
   13451 	bool wa_done = false;
   13452 	int i;
   13453 
   13454 	/* Save WUC and MDICNFG registers */
   13455 	wuc = CSR_READ(sc, WMREG_WUC);
   13456 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13457 
   13458 	reg = mdicnfg & ~MDICNFG_DEST;
   13459 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13460 
   13461 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13462 		nvmword = INVM_DEFAULT_AL;
   13463 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13464 
   13465 	/* Get Power Management cap offset */
   13466 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13467 		&pmreg, NULL) == 0)
   13468 		return;
   13469 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13470 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13471 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13472 
   13473 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13474 			break; /* OK */
   13475 		}
   13476 
   13477 		wa_done = true;
   13478 		/* Directly reset the internal PHY */
   13479 		reg = CSR_READ(sc, WMREG_CTRL);
   13480 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13481 
   13482 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13483 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13484 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13485 
   13486 		CSR_WRITE(sc, WMREG_WUC, 0);
   13487 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13488 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13489 
   13490 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13491 		    pmreg + PCI_PMCSR);
   13492 		pcireg |= PCI_PMCSR_STATE_D3;
   13493 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13494 		    pmreg + PCI_PMCSR, pcireg);
   13495 		delay(1000);
   13496 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13497 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13498 		    pmreg + PCI_PMCSR, pcireg);
   13499 
   13500 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13501 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13502 
   13503 		/* Restore WUC register */
   13504 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13505 	}
   13506 
   13507 	/* Restore MDICNFG setting */
   13508 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13509 	if (wa_done)
   13510 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13511 }
   13512