Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.476
      1 /*	$NetBSD: if_wm.c,v 1.476 2017/02/02 10:29:10 knakahara Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Split header buffer for newer descriptors
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.476 2017/02/02 10:29:10 knakahara Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #endif
    165 
    166 /*
    167  * This device driver's max interrupt numbers.
    168  */
    169 #define WM_MAX_NQUEUEINTR	16
    170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 #define	WM_TXINTERQSIZE		256
    201 
    202 /*
    203  * Receive descriptor list size.  We have one Rx buffer for normal
    204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    205  * packet.  We allocate 256 receive descriptors, each with a 2k
    206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    207  */
    208 #define	WM_NRXDESC		256
    209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    212 
    213 typedef union txdescs {
    214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    216 } txdescs_t;
    217 
    218 typedef union rxdescs {
    219 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    220 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    221 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    222 } rxdescs_t;
    223 
    224 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    225 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    226 
    227 /*
    228  * Software state for transmit jobs.
    229  */
    230 struct wm_txsoft {
    231 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    232 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    233 	int txs_firstdesc;		/* first descriptor in packet */
    234 	int txs_lastdesc;		/* last descriptor in packet */
    235 	int txs_ndesc;			/* # of descriptors used */
    236 };
    237 
    238 /*
    239  * Software state for receive buffers.  Each descriptor gets a
    240  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    241  * more than one buffer, we chain them together.
    242  */
    243 struct wm_rxsoft {
    244 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    245 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    246 };
    247 
    248 #define WM_LINKUP_TIMEOUT	50
    249 
    250 static uint16_t swfwphysem[] = {
    251 	SWFW_PHY0_SM,
    252 	SWFW_PHY1_SM,
    253 	SWFW_PHY2_SM,
    254 	SWFW_PHY3_SM
    255 };
    256 
    257 static const uint32_t wm_82580_rxpbs_table[] = {
    258 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    259 };
    260 
    261 struct wm_softc;
    262 
    263 #ifdef WM_EVENT_COUNTERS
    264 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    265 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    266 	struct evcnt qname##_ev_##evname;
    267 
    268 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    269 	do{								\
    270 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    271 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    272 		    "%s%02d%s", #qname, (qnum), #evname);		\
    273 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    274 		    (evtype), NULL, (xname),				\
    275 		    (q)->qname##_##evname##_evcnt_name);		\
    276 	}while(0)
    277 
    278 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    279 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    280 
    281 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    282 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    283 #endif /* WM_EVENT_COUNTERS */
    284 
    285 struct wm_txqueue {
    286 	kmutex_t *txq_lock;		/* lock for tx operations */
    287 
    288 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    289 
    290 	/* Software state for the transmit descriptors. */
    291 	int txq_num;			/* must be a power of two */
    292 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    293 
    294 	/* TX control data structures. */
    295 	int txq_ndesc;			/* must be a power of two */
    296 	size_t txq_descsize;		/* a tx descriptor size */
    297 	txdescs_t *txq_descs_u;
    298         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    299 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    300 	int txq_desc_rseg;		/* real number of control segment */
    301 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    302 #define	txq_descs	txq_descs_u->sctxu_txdescs
    303 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    304 
    305 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    306 
    307 	int txq_free;			/* number of free Tx descriptors */
    308 	int txq_next;			/* next ready Tx descriptor */
    309 
    310 	int txq_sfree;			/* number of free Tx jobs */
    311 	int txq_snext;			/* next free Tx job */
    312 	int txq_sdirty;			/* dirty Tx jobs */
    313 
    314 	/* These 4 variables are used only on the 82547. */
    315 	int txq_fifo_size;		/* Tx FIFO size */
    316 	int txq_fifo_head;		/* current head of FIFO */
    317 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    318 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    319 
    320 	/*
    321 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    322 	 * CPUs. This queue intermediate them without block.
    323 	 */
    324 	pcq_t *txq_interq;
    325 
    326 	/*
    327 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    328 	 * to manage Tx H/W queue's busy flag.
    329 	 */
    330 	int txq_flags;			/* flags for H/W queue, see below */
    331 #define	WM_TXQ_NO_SPACE	0x1
    332 
    333 	bool txq_stopping;
    334 
    335 #ifdef WM_EVENT_COUNTERS
    336 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    337 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    338 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    339 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    340 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    341 						/* XXX not used? */
    342 
    343 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    344 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    345 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    346 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    347 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    348 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    349 
    350 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    351 
    352 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    353 
    354 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    355 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    356 #endif /* WM_EVENT_COUNTERS */
    357 };
    358 
    359 struct wm_rxqueue {
    360 	kmutex_t *rxq_lock;		/* lock for rx operations */
    361 
    362 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    363 
    364 	/* Software state for the receive descriptors. */
    365 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    366 
    367 	/* RX control data structures. */
    368 	int rxq_ndesc;			/* must be a power of two */
    369 	size_t rxq_descsize;		/* a rx descriptor size */
    370 	rxdescs_t *rxq_descs_u;
    371 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    372 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    373 	int rxq_desc_rseg;		/* real number of control segment */
    374 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    375 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    376 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    377 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    378 
    379 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    380 
    381 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    382 	int rxq_discard;
    383 	int rxq_len;
    384 	struct mbuf *rxq_head;
    385 	struct mbuf *rxq_tail;
    386 	struct mbuf **rxq_tailp;
    387 
    388 	bool rxq_stopping;
    389 
    390 #ifdef WM_EVENT_COUNTERS
    391 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    392 
    393 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    394 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    395 #endif
    396 };
    397 
    398 struct wm_queue {
    399 	int wmq_id;			/* index of transmit and receive queues */
    400 	int wmq_intr_idx;		/* index of MSI-X tables */
    401 
    402 	struct wm_txqueue wmq_txq;
    403 	struct wm_rxqueue wmq_rxq;
    404 };
    405 
    406 struct wm_phyop {
    407 	int (*acquire)(struct wm_softc *);
    408 	void (*release)(struct wm_softc *);
    409 	int reset_delay_us;
    410 };
    411 
    412 /*
    413  * Software state per device.
    414  */
    415 struct wm_softc {
    416 	device_t sc_dev;		/* generic device information */
    417 	bus_space_tag_t sc_st;		/* bus space tag */
    418 	bus_space_handle_t sc_sh;	/* bus space handle */
    419 	bus_size_t sc_ss;		/* bus space size */
    420 	bus_space_tag_t sc_iot;		/* I/O space tag */
    421 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    422 	bus_size_t sc_ios;		/* I/O space size */
    423 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    424 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    425 	bus_size_t sc_flashs;		/* flash registers space size */
    426 	off_t sc_flashreg_offset;	/*
    427 					 * offset to flash registers from
    428 					 * start of BAR
    429 					 */
    430 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    431 
    432 	struct ethercom sc_ethercom;	/* ethernet common data */
    433 	struct mii_data sc_mii;		/* MII/media information */
    434 
    435 	pci_chipset_tag_t sc_pc;
    436 	pcitag_t sc_pcitag;
    437 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    438 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    439 
    440 	uint16_t sc_pcidevid;		/* PCI device ID */
    441 	wm_chip_type sc_type;		/* MAC type */
    442 	int sc_rev;			/* MAC revision */
    443 	wm_phy_type sc_phytype;		/* PHY type */
    444 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    445 #define	WM_MEDIATYPE_UNKNOWN		0x00
    446 #define	WM_MEDIATYPE_FIBER		0x01
    447 #define	WM_MEDIATYPE_COPPER		0x02
    448 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    449 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    450 	int sc_flags;			/* flags; see below */
    451 	int sc_if_flags;		/* last if_flags */
    452 	int sc_flowflags;		/* 802.3x flow control flags */
    453 	int sc_align_tweak;
    454 
    455 	void *sc_ihs[WM_MAX_NINTR];	/*
    456 					 * interrupt cookie.
    457 					 * legacy and msi use sc_ihs[0].
    458 					 */
    459 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    460 	int sc_nintrs;			/* number of interrupts */
    461 
    462 	int sc_link_intr_idx;		/* index of MSI-X tables */
    463 
    464 	callout_t sc_tick_ch;		/* tick callout */
    465 	bool sc_core_stopping;
    466 
    467 	int sc_nvm_ver_major;
    468 	int sc_nvm_ver_minor;
    469 	int sc_nvm_ver_build;
    470 	int sc_nvm_addrbits;		/* NVM address bits */
    471 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    472 	int sc_ich8_flash_base;
    473 	int sc_ich8_flash_bank_size;
    474 	int sc_nvm_k1_enabled;
    475 
    476 	int sc_nqueues;
    477 	struct wm_queue *sc_queue;
    478 
    479 	int sc_affinity_offset;
    480 
    481 #ifdef WM_EVENT_COUNTERS
    482 	/* Event counters. */
    483 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    484 
    485         /* WM_T_82542_2_1 only */
    486 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    487 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    488 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    489 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    490 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    491 #endif /* WM_EVENT_COUNTERS */
    492 
    493 	/* This variable are used only on the 82547. */
    494 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    495 
    496 	uint32_t sc_ctrl;		/* prototype CTRL register */
    497 #if 0
    498 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    499 #endif
    500 	uint32_t sc_icr;		/* prototype interrupt bits */
    501 	uint32_t sc_itr;		/* prototype intr throttling reg */
    502 	uint32_t sc_tctl;		/* prototype TCTL register */
    503 	uint32_t sc_rctl;		/* prototype RCTL register */
    504 	uint32_t sc_txcw;		/* prototype TXCW register */
    505 	uint32_t sc_tipg;		/* prototype TIPG register */
    506 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    507 	uint32_t sc_pba;		/* prototype PBA register */
    508 
    509 	int sc_tbi_linkup;		/* TBI link status */
    510 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    511 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    512 
    513 	int sc_mchash_type;		/* multicast filter offset */
    514 
    515 	krndsource_t rnd_source;	/* random source */
    516 
    517 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    518 
    519 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    520 	kmutex_t *sc_ich_phymtx;	/*
    521 					 * 82574/82583/ICH/PCH specific PHY
    522 					 * mutex. For 82574/82583, the mutex
    523 					 * is used for both PHY and NVM.
    524 					 */
    525 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    526 
    527 	struct wm_phyop phy;
    528 };
    529 
    530 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    531 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    532 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    533 
    534 #ifdef WM_MPSAFE
    535 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    536 #else
    537 #define CALLOUT_FLAGS	0
    538 #endif
    539 
    540 #define	WM_RXCHAIN_RESET(rxq)						\
    541 do {									\
    542 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    543 	*(rxq)->rxq_tailp = NULL;					\
    544 	(rxq)->rxq_len = 0;						\
    545 } while (/*CONSTCOND*/0)
    546 
    547 #define	WM_RXCHAIN_LINK(rxq, m)						\
    548 do {									\
    549 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    550 	(rxq)->rxq_tailp = &(m)->m_next;				\
    551 } while (/*CONSTCOND*/0)
    552 
    553 #ifdef WM_EVENT_COUNTERS
    554 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    555 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    556 
    557 #define WM_Q_EVCNT_INCR(qname, evname)			\
    558 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    559 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    560 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    561 #else /* !WM_EVENT_COUNTERS */
    562 #define	WM_EVCNT_INCR(ev)	/* nothing */
    563 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    564 
    565 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    566 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    567 #endif /* !WM_EVENT_COUNTERS */
    568 
    569 #define	CSR_READ(sc, reg)						\
    570 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    571 #define	CSR_WRITE(sc, reg, val)						\
    572 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    573 #define	CSR_WRITE_FLUSH(sc)						\
    574 	(void) CSR_READ((sc), WMREG_STATUS)
    575 
    576 #define ICH8_FLASH_READ32(sc, reg)					\
    577 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    578 	    (reg) + sc->sc_flashreg_offset)
    579 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    580 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    581 	    (reg) + sc->sc_flashreg_offset, (data))
    582 
    583 #define ICH8_FLASH_READ16(sc, reg)					\
    584 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    585 	    (reg) + sc->sc_flashreg_offset)
    586 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    587 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    588 	    (reg) + sc->sc_flashreg_offset, (data))
    589 
    590 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    591 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    592 
    593 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    594 #define	WM_CDTXADDR_HI(txq, x)						\
    595 	(sizeof(bus_addr_t) == 8 ?					\
    596 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    597 
    598 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    599 #define	WM_CDRXADDR_HI(rxq, x)						\
    600 	(sizeof(bus_addr_t) == 8 ?					\
    601 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    602 
    603 /*
    604  * Register read/write functions.
    605  * Other than CSR_{READ|WRITE}().
    606  */
    607 #if 0
    608 static inline uint32_t wm_io_read(struct wm_softc *, int);
    609 #endif
    610 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    611 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    612 	uint32_t, uint32_t);
    613 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    614 
    615 /*
    616  * Descriptor sync/init functions.
    617  */
    618 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    619 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    620 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    621 
    622 /*
    623  * Device driver interface functions and commonly used functions.
    624  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    625  */
    626 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    627 static int	wm_match(device_t, cfdata_t, void *);
    628 static void	wm_attach(device_t, device_t, void *);
    629 static int	wm_detach(device_t, int);
    630 static bool	wm_suspend(device_t, const pmf_qual_t *);
    631 static bool	wm_resume(device_t, const pmf_qual_t *);
    632 static void	wm_watchdog(struct ifnet *);
    633 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    634 static void	wm_tick(void *);
    635 static int	wm_ifflags_cb(struct ethercom *);
    636 static int	wm_ioctl(struct ifnet *, u_long, void *);
    637 /* MAC address related */
    638 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    639 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    640 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    641 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    642 static void	wm_set_filter(struct wm_softc *);
    643 /* Reset and init related */
    644 static void	wm_set_vlan(struct wm_softc *);
    645 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    646 static void	wm_get_auto_rd_done(struct wm_softc *);
    647 static void	wm_lan_init_done(struct wm_softc *);
    648 static void	wm_get_cfg_done(struct wm_softc *);
    649 static void	wm_initialize_hardware_bits(struct wm_softc *);
    650 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    651 static void	wm_reset_phy(struct wm_softc *);
    652 static void	wm_flush_desc_rings(struct wm_softc *);
    653 static void	wm_reset(struct wm_softc *);
    654 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    655 static void	wm_rxdrain(struct wm_rxqueue *);
    656 static void	wm_rss_getkey(uint8_t *);
    657 static void	wm_init_rss(struct wm_softc *);
    658 static void	wm_adjust_qnum(struct wm_softc *, int);
    659 static int	wm_setup_legacy(struct wm_softc *);
    660 static int	wm_setup_msix(struct wm_softc *);
    661 static int	wm_init(struct ifnet *);
    662 static int	wm_init_locked(struct ifnet *);
    663 static void	wm_turnon(struct wm_softc *);
    664 static void	wm_turnoff(struct wm_softc *);
    665 static void	wm_stop(struct ifnet *, int);
    666 static void	wm_stop_locked(struct ifnet *, int);
    667 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    668 static void	wm_82547_txfifo_stall(void *);
    669 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    670 /* DMA related */
    671 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    672 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    673 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    674 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    675     struct wm_txqueue *);
    676 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    677 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    678 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    679     struct wm_rxqueue *);
    680 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    681 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    682 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    683 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    684 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    685 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    686 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    687     struct wm_txqueue *);
    688 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    689     struct wm_rxqueue *);
    690 static int	wm_alloc_txrx_queues(struct wm_softc *);
    691 static void	wm_free_txrx_queues(struct wm_softc *);
    692 static int	wm_init_txrx_queues(struct wm_softc *);
    693 /* Start */
    694 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    695     uint32_t *, uint8_t *);
    696 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    697 static void	wm_start(struct ifnet *);
    698 static void	wm_start_locked(struct ifnet *);
    699 static int	wm_transmit(struct ifnet *, struct mbuf *);
    700 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    701 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    702 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    703     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    704 static void	wm_nq_start(struct ifnet *);
    705 static void	wm_nq_start_locked(struct ifnet *);
    706 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    707 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    708 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    709 static void	wm_deferred_start(struct ifnet *);
    710 /* Interrupt */
    711 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    712 static void	wm_rxeof(struct wm_rxqueue *);
    713 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    714 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    715 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    716 static void	wm_linkintr(struct wm_softc *, uint32_t);
    717 static int	wm_intr_legacy(void *);
    718 static int	wm_txrxintr_msix(void *);
    719 static int	wm_linkintr_msix(void *);
    720 
    721 /*
    722  * Media related.
    723  * GMII, SGMII, TBI, SERDES and SFP.
    724  */
    725 /* Common */
    726 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    727 /* GMII related */
    728 static void	wm_gmii_reset(struct wm_softc *);
    729 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    730 static int	wm_get_phy_id_82575(struct wm_softc *);
    731 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    732 static int	wm_gmii_mediachange(struct ifnet *);
    733 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    734 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    735 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    736 static int	wm_gmii_i82543_readreg(device_t, int, int);
    737 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    738 static int	wm_gmii_mdic_readreg(device_t, int, int);
    739 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    740 static int	wm_gmii_i82544_readreg(device_t, int, int);
    741 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    742 static int	wm_gmii_i80003_readreg(device_t, int, int);
    743 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    744 static int	wm_gmii_bm_readreg(device_t, int, int);
    745 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    746 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    747 static int	wm_gmii_hv_readreg(device_t, int, int);
    748 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    749 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    750 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    751 static int	wm_gmii_82580_readreg(device_t, int, int);
    752 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    753 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    754 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    755 static void	wm_gmii_statchg(struct ifnet *);
    756 /*
    757  * kumeran related (80003, ICH* and PCH*).
    758  * These functions are not for accessing MII registers but for accessing
    759  * kumeran specific registers.
    760  */
    761 static int	wm_kmrn_readreg(struct wm_softc *, int);
    762 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    763 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    764 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    765 /* SGMII */
    766 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    767 static int	wm_sgmii_readreg(device_t, int, int);
    768 static void	wm_sgmii_writereg(device_t, int, int, int);
    769 /* TBI related */
    770 static void	wm_tbi_mediainit(struct wm_softc *);
    771 static int	wm_tbi_mediachange(struct ifnet *);
    772 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    773 static int	wm_check_for_link(struct wm_softc *);
    774 static void	wm_tbi_tick(struct wm_softc *);
    775 /* SERDES related */
    776 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    777 static int	wm_serdes_mediachange(struct ifnet *);
    778 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    779 static void	wm_serdes_tick(struct wm_softc *);
    780 /* SFP related */
    781 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    782 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    783 
    784 /*
    785  * NVM related.
    786  * Microwire, SPI (w/wo EERD) and Flash.
    787  */
    788 /* Misc functions */
    789 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    790 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    791 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    792 /* Microwire */
    793 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    794 /* SPI */
    795 static int	wm_nvm_ready_spi(struct wm_softc *);
    796 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    797 /* Using with EERD */
    798 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    799 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    800 /* Flash */
    801 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    802     unsigned int *);
    803 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    804 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    805 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    806 	uint32_t *);
    807 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    808 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    809 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    810 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    811 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    812 /* iNVM */
    813 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    814 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    815 /* Lock, detecting NVM type, validate checksum and read */
    816 static int	wm_nvm_acquire(struct wm_softc *);
    817 static void	wm_nvm_release(struct wm_softc *);
    818 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    819 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    820 static int	wm_nvm_validate_checksum(struct wm_softc *);
    821 static void	wm_nvm_version_invm(struct wm_softc *);
    822 static void	wm_nvm_version(struct wm_softc *);
    823 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    824 
    825 /*
    826  * Hardware semaphores.
    827  * Very complexed...
    828  */
    829 static int	wm_get_null(struct wm_softc *);
    830 static void	wm_put_null(struct wm_softc *);
    831 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    832 static void	wm_put_swsm_semaphore(struct wm_softc *);
    833 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    834 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    835 static int	wm_get_phy_82575(struct wm_softc *);
    836 static void	wm_put_phy_82575(struct wm_softc *);
    837 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    838 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    839 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    840 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    841 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    842 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    843 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    844 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    845 
    846 /*
    847  * Management mode and power management related subroutines.
    848  * BMC, AMT, suspend/resume and EEE.
    849  */
    850 #if 0
    851 static int	wm_check_mng_mode(struct wm_softc *);
    852 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    853 static int	wm_check_mng_mode_82574(struct wm_softc *);
    854 static int	wm_check_mng_mode_generic(struct wm_softc *);
    855 #endif
    856 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    857 static bool	wm_phy_resetisblocked(struct wm_softc *);
    858 static void	wm_get_hw_control(struct wm_softc *);
    859 static void	wm_release_hw_control(struct wm_softc *);
    860 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    861 static void	wm_smbustopci(struct wm_softc *);
    862 static void	wm_init_manageability(struct wm_softc *);
    863 static void	wm_release_manageability(struct wm_softc *);
    864 static void	wm_get_wakeup(struct wm_softc *);
    865 static void	wm_ulp_disable(struct wm_softc *);
    866 static void	wm_enable_phy_wakeup(struct wm_softc *);
    867 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    868 static void	wm_enable_wakeup(struct wm_softc *);
    869 /* LPLU (Low Power Link Up) */
    870 static void	wm_lplu_d0_disable(struct wm_softc *);
    871 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    872 /* EEE */
    873 static void	wm_set_eee_i350(struct wm_softc *);
    874 
    875 /*
    876  * Workarounds (mainly PHY related).
    877  * Basically, PHY's workarounds are in the PHY drivers.
    878  */
    879 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    880 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    881 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    882 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    883 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    884 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    885 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    886 static void	wm_reset_init_script_82575(struct wm_softc *);
    887 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    888 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    889 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    890 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    891 static void	wm_pll_workaround_i210(struct wm_softc *);
    892 
    893 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    894     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    895 
    896 /*
    897  * Devices supported by this driver.
    898  */
    899 static const struct wm_product {
    900 	pci_vendor_id_t		wmp_vendor;
    901 	pci_product_id_t	wmp_product;
    902 	const char		*wmp_name;
    903 	wm_chip_type		wmp_type;
    904 	uint32_t		wmp_flags;
    905 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    906 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    907 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    908 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    909 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    910 } wm_products[] = {
    911 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    912 	  "Intel i82542 1000BASE-X Ethernet",
    913 	  WM_T_82542_2_1,	WMP_F_FIBER },
    914 
    915 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    916 	  "Intel i82543GC 1000BASE-X Ethernet",
    917 	  WM_T_82543,		WMP_F_FIBER },
    918 
    919 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    920 	  "Intel i82543GC 1000BASE-T Ethernet",
    921 	  WM_T_82543,		WMP_F_COPPER },
    922 
    923 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    924 	  "Intel i82544EI 1000BASE-T Ethernet",
    925 	  WM_T_82544,		WMP_F_COPPER },
    926 
    927 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    928 	  "Intel i82544EI 1000BASE-X Ethernet",
    929 	  WM_T_82544,		WMP_F_FIBER },
    930 
    931 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    932 	  "Intel i82544GC 1000BASE-T Ethernet",
    933 	  WM_T_82544,		WMP_F_COPPER },
    934 
    935 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    936 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    937 	  WM_T_82544,		WMP_F_COPPER },
    938 
    939 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    940 	  "Intel i82540EM 1000BASE-T Ethernet",
    941 	  WM_T_82540,		WMP_F_COPPER },
    942 
    943 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    944 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    945 	  WM_T_82540,		WMP_F_COPPER },
    946 
    947 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    948 	  "Intel i82540EP 1000BASE-T Ethernet",
    949 	  WM_T_82540,		WMP_F_COPPER },
    950 
    951 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    952 	  "Intel i82540EP 1000BASE-T Ethernet",
    953 	  WM_T_82540,		WMP_F_COPPER },
    954 
    955 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    956 	  "Intel i82540EP 1000BASE-T Ethernet",
    957 	  WM_T_82540,		WMP_F_COPPER },
    958 
    959 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    960 	  "Intel i82545EM 1000BASE-T Ethernet",
    961 	  WM_T_82545,		WMP_F_COPPER },
    962 
    963 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    964 	  "Intel i82545GM 1000BASE-T Ethernet",
    965 	  WM_T_82545_3,		WMP_F_COPPER },
    966 
    967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    968 	  "Intel i82545GM 1000BASE-X Ethernet",
    969 	  WM_T_82545_3,		WMP_F_FIBER },
    970 
    971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    972 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    973 	  WM_T_82545_3,		WMP_F_SERDES },
    974 
    975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    976 	  "Intel i82546EB 1000BASE-T Ethernet",
    977 	  WM_T_82546,		WMP_F_COPPER },
    978 
    979 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    980 	  "Intel i82546EB 1000BASE-T Ethernet",
    981 	  WM_T_82546,		WMP_F_COPPER },
    982 
    983 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    984 	  "Intel i82545EM 1000BASE-X Ethernet",
    985 	  WM_T_82545,		WMP_F_FIBER },
    986 
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    988 	  "Intel i82546EB 1000BASE-X Ethernet",
    989 	  WM_T_82546,		WMP_F_FIBER },
    990 
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    992 	  "Intel i82546GB 1000BASE-T Ethernet",
    993 	  WM_T_82546_3,		WMP_F_COPPER },
    994 
    995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    996 	  "Intel i82546GB 1000BASE-X Ethernet",
    997 	  WM_T_82546_3,		WMP_F_FIBER },
    998 
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1000 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1001 	  WM_T_82546_3,		WMP_F_SERDES },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1004 	  "i82546GB quad-port Gigabit Ethernet",
   1005 	  WM_T_82546_3,		WMP_F_COPPER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1008 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1009 	  WM_T_82546_3,		WMP_F_COPPER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1012 	  "Intel PRO/1000MT (82546GB)",
   1013 	  WM_T_82546_3,		WMP_F_COPPER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1016 	  "Intel i82541EI 1000BASE-T Ethernet",
   1017 	  WM_T_82541,		WMP_F_COPPER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1020 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1021 	  WM_T_82541,		WMP_F_COPPER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1024 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1025 	  WM_T_82541,		WMP_F_COPPER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1028 	  "Intel i82541ER 1000BASE-T Ethernet",
   1029 	  WM_T_82541_2,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1032 	  "Intel i82541GI 1000BASE-T Ethernet",
   1033 	  WM_T_82541_2,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1036 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1037 	  WM_T_82541_2,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1040 	  "Intel i82541PI 1000BASE-T Ethernet",
   1041 	  WM_T_82541_2,		WMP_F_COPPER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1044 	  "Intel i82547EI 1000BASE-T Ethernet",
   1045 	  WM_T_82547,		WMP_F_COPPER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1048 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1049 	  WM_T_82547,		WMP_F_COPPER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1052 	  "Intel i82547GI 1000BASE-T Ethernet",
   1053 	  WM_T_82547_2,		WMP_F_COPPER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1056 	  "Intel PRO/1000 PT (82571EB)",
   1057 	  WM_T_82571,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1060 	  "Intel PRO/1000 PF (82571EB)",
   1061 	  WM_T_82571,		WMP_F_FIBER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1064 	  "Intel PRO/1000 PB (82571EB)",
   1065 	  WM_T_82571,		WMP_F_SERDES },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1068 	  "Intel PRO/1000 QT (82571EB)",
   1069 	  WM_T_82571,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1072 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1073 	  WM_T_82571,		WMP_F_COPPER, },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1076 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1077 	  WM_T_82571,		WMP_F_COPPER, },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1080 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1081 	  WM_T_82571,		WMP_F_SERDES, },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1084 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1085 	  WM_T_82571,		WMP_F_SERDES, },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1088 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1089 	  WM_T_82571,		WMP_F_FIBER, },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1092 	  "Intel i82572EI 1000baseT Ethernet",
   1093 	  WM_T_82572,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1096 	  "Intel i82572EI 1000baseX Ethernet",
   1097 	  WM_T_82572,		WMP_F_FIBER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1100 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1101 	  WM_T_82572,		WMP_F_SERDES },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1104 	  "Intel i82572EI 1000baseT Ethernet",
   1105 	  WM_T_82572,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1108 	  "Intel i82573E",
   1109 	  WM_T_82573,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1112 	  "Intel i82573E IAMT",
   1113 	  WM_T_82573,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1116 	  "Intel i82573L Gigabit Ethernet",
   1117 	  WM_T_82573,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1120 	  "Intel i82574L",
   1121 	  WM_T_82574,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1124 	  "Intel i82574L",
   1125 	  WM_T_82574,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1128 	  "Intel i82583V",
   1129 	  WM_T_82583,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1132 	  "i80003 dual 1000baseT Ethernet",
   1133 	  WM_T_80003,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1136 	  "i80003 dual 1000baseX Ethernet",
   1137 	  WM_T_80003,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1140 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1141 	  WM_T_80003,		WMP_F_SERDES },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1144 	  "Intel i80003 1000baseT Ethernet",
   1145 	  WM_T_80003,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1148 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1149 	  WM_T_80003,		WMP_F_SERDES },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1152 	  "Intel i82801H (M_AMT) LAN Controller",
   1153 	  WM_T_ICH8,		WMP_F_COPPER },
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1155 	  "Intel i82801H (AMT) LAN Controller",
   1156 	  WM_T_ICH8,		WMP_F_COPPER },
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1158 	  "Intel i82801H LAN Controller",
   1159 	  WM_T_ICH8,		WMP_F_COPPER },
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1161 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1162 	  WM_T_ICH8,		WMP_F_COPPER },
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1164 	  "Intel i82801H (M) LAN Controller",
   1165 	  WM_T_ICH8,		WMP_F_COPPER },
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1167 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1168 	  WM_T_ICH8,		WMP_F_COPPER },
   1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1170 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1171 	  WM_T_ICH8,		WMP_F_COPPER },
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1173 	  "82567V-3 LAN Controller",
   1174 	  WM_T_ICH8,		WMP_F_COPPER },
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1176 	  "82801I (AMT) LAN Controller",
   1177 	  WM_T_ICH9,		WMP_F_COPPER },
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1179 	  "82801I 10/100 LAN Controller",
   1180 	  WM_T_ICH9,		WMP_F_COPPER },
   1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1182 	  "82801I (G) 10/100 LAN Controller",
   1183 	  WM_T_ICH9,		WMP_F_COPPER },
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1185 	  "82801I (GT) 10/100 LAN Controller",
   1186 	  WM_T_ICH9,		WMP_F_COPPER },
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1188 	  "82801I (C) LAN Controller",
   1189 	  WM_T_ICH9,		WMP_F_COPPER },
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1191 	  "82801I mobile LAN Controller",
   1192 	  WM_T_ICH9,		WMP_F_COPPER },
   1193 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1194 	  "82801I mobile (V) LAN Controller",
   1195 	  WM_T_ICH9,		WMP_F_COPPER },
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1197 	  "82801I mobile (AMT) LAN Controller",
   1198 	  WM_T_ICH9,		WMP_F_COPPER },
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1200 	  "82567LM-4 LAN Controller",
   1201 	  WM_T_ICH9,		WMP_F_COPPER },
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1203 	  "82567LM-2 LAN Controller",
   1204 	  WM_T_ICH10,		WMP_F_COPPER },
   1205 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1206 	  "82567LF-2 LAN Controller",
   1207 	  WM_T_ICH10,		WMP_F_COPPER },
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1209 	  "82567LM-3 LAN Controller",
   1210 	  WM_T_ICH10,		WMP_F_COPPER },
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1212 	  "82567LF-3 LAN Controller",
   1213 	  WM_T_ICH10,		WMP_F_COPPER },
   1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1215 	  "82567V-2 LAN Controller",
   1216 	  WM_T_ICH10,		WMP_F_COPPER },
   1217 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1218 	  "82567V-3? LAN Controller",
   1219 	  WM_T_ICH10,		WMP_F_COPPER },
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1221 	  "HANKSVILLE LAN Controller",
   1222 	  WM_T_ICH10,		WMP_F_COPPER },
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1224 	  "PCH LAN (82577LM) Controller",
   1225 	  WM_T_PCH,		WMP_F_COPPER },
   1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1227 	  "PCH LAN (82577LC) Controller",
   1228 	  WM_T_PCH,		WMP_F_COPPER },
   1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1230 	  "PCH LAN (82578DM) Controller",
   1231 	  WM_T_PCH,		WMP_F_COPPER },
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1233 	  "PCH LAN (82578DC) Controller",
   1234 	  WM_T_PCH,		WMP_F_COPPER },
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1236 	  "PCH2 LAN (82579LM) Controller",
   1237 	  WM_T_PCH2,		WMP_F_COPPER },
   1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1239 	  "PCH2 LAN (82579V) Controller",
   1240 	  WM_T_PCH2,		WMP_F_COPPER },
   1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1242 	  "82575EB dual-1000baseT Ethernet",
   1243 	  WM_T_82575,		WMP_F_COPPER },
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1245 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1246 	  WM_T_82575,		WMP_F_SERDES },
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1248 	  "82575GB quad-1000baseT Ethernet",
   1249 	  WM_T_82575,		WMP_F_COPPER },
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1251 	  "82575GB quad-1000baseT Ethernet (PM)",
   1252 	  WM_T_82575,		WMP_F_COPPER },
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1254 	  "82576 1000BaseT Ethernet",
   1255 	  WM_T_82576,		WMP_F_COPPER },
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1257 	  "82576 1000BaseX Ethernet",
   1258 	  WM_T_82576,		WMP_F_FIBER },
   1259 
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1261 	  "82576 gigabit Ethernet (SERDES)",
   1262 	  WM_T_82576,		WMP_F_SERDES },
   1263 
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1265 	  "82576 quad-1000BaseT Ethernet",
   1266 	  WM_T_82576,		WMP_F_COPPER },
   1267 
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1269 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1270 	  WM_T_82576,		WMP_F_COPPER },
   1271 
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1273 	  "82576 gigabit Ethernet",
   1274 	  WM_T_82576,		WMP_F_COPPER },
   1275 
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1277 	  "82576 gigabit Ethernet (SERDES)",
   1278 	  WM_T_82576,		WMP_F_SERDES },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1280 	  "82576 quad-gigabit Ethernet (SERDES)",
   1281 	  WM_T_82576,		WMP_F_SERDES },
   1282 
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1284 	  "82580 1000BaseT Ethernet",
   1285 	  WM_T_82580,		WMP_F_COPPER },
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1287 	  "82580 1000BaseX Ethernet",
   1288 	  WM_T_82580,		WMP_F_FIBER },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1291 	  "82580 1000BaseT Ethernet (SERDES)",
   1292 	  WM_T_82580,		WMP_F_SERDES },
   1293 
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1295 	  "82580 gigabit Ethernet (SGMII)",
   1296 	  WM_T_82580,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1298 	  "82580 dual-1000BaseT Ethernet",
   1299 	  WM_T_82580,		WMP_F_COPPER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1302 	  "82580 quad-1000BaseX Ethernet",
   1303 	  WM_T_82580,		WMP_F_FIBER },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1306 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1307 	  WM_T_82580,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1310 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1311 	  WM_T_82580,		WMP_F_SERDES },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1314 	  "DH89XXCC 1000BASE-KX Ethernet",
   1315 	  WM_T_82580,		WMP_F_SERDES },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1318 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1319 	  WM_T_82580,		WMP_F_SERDES },
   1320 
   1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1322 	  "I350 Gigabit Network Connection",
   1323 	  WM_T_I350,		WMP_F_COPPER },
   1324 
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1326 	  "I350 Gigabit Fiber Network Connection",
   1327 	  WM_T_I350,		WMP_F_FIBER },
   1328 
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1330 	  "I350 Gigabit Backplane Connection",
   1331 	  WM_T_I350,		WMP_F_SERDES },
   1332 
   1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1334 	  "I350 Quad Port Gigabit Ethernet",
   1335 	  WM_T_I350,		WMP_F_SERDES },
   1336 
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1338 	  "I350 Gigabit Connection",
   1339 	  WM_T_I350,		WMP_F_COPPER },
   1340 
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1342 	  "I354 Gigabit Ethernet (KX)",
   1343 	  WM_T_I354,		WMP_F_SERDES },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1346 	  "I354 Gigabit Ethernet (SGMII)",
   1347 	  WM_T_I354,		WMP_F_COPPER },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1350 	  "I354 Gigabit Ethernet (2.5G)",
   1351 	  WM_T_I354,		WMP_F_COPPER },
   1352 
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1354 	  "I210-T1 Ethernet Server Adapter",
   1355 	  WM_T_I210,		WMP_F_COPPER },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1358 	  "I210 Ethernet (Copper OEM)",
   1359 	  WM_T_I210,		WMP_F_COPPER },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1362 	  "I210 Ethernet (Copper IT)",
   1363 	  WM_T_I210,		WMP_F_COPPER },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1366 	  "I210 Ethernet (FLASH less)",
   1367 	  WM_T_I210,		WMP_F_COPPER },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1370 	  "I210 Gigabit Ethernet (Fiber)",
   1371 	  WM_T_I210,		WMP_F_FIBER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1374 	  "I210 Gigabit Ethernet (SERDES)",
   1375 	  WM_T_I210,		WMP_F_SERDES },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1378 	  "I210 Gigabit Ethernet (FLASH less)",
   1379 	  WM_T_I210,		WMP_F_SERDES },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1382 	  "I210 Gigabit Ethernet (SGMII)",
   1383 	  WM_T_I210,		WMP_F_COPPER },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1386 	  "I211 Ethernet (COPPER)",
   1387 	  WM_T_I211,		WMP_F_COPPER },
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1389 	  "I217 V Ethernet Connection",
   1390 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1392 	  "I217 LM Ethernet Connection",
   1393 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1395 	  "I218 V Ethernet Connection",
   1396 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1398 	  "I218 V Ethernet Connection",
   1399 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1401 	  "I218 V Ethernet Connection",
   1402 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1404 	  "I218 LM Ethernet Connection",
   1405 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1407 	  "I218 LM Ethernet Connection",
   1408 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1410 	  "I218 LM Ethernet Connection",
   1411 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1412 #if 0
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1414 	  "I219 V Ethernet Connection",
   1415 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1417 	  "I219 V Ethernet Connection",
   1418 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1419 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1420 	  "I219 V Ethernet Connection",
   1421 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1423 	  "I219 V Ethernet Connection",
   1424 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1426 	  "I219 LM Ethernet Connection",
   1427 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1429 	  "I219 LM Ethernet Connection",
   1430 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1431 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1432 	  "I219 LM Ethernet Connection",
   1433 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1435 	  "I219 LM Ethernet Connection",
   1436 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1438 	  "I219 LM Ethernet Connection",
   1439 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1440 #endif
   1441 	{ 0,			0,
   1442 	  NULL,
   1443 	  0,			0 },
   1444 };
   1445 
   1446 /*
   1447  * Register read/write functions.
   1448  * Other than CSR_{READ|WRITE}().
   1449  */
   1450 
   1451 #if 0 /* Not currently used */
   1452 static inline uint32_t
   1453 wm_io_read(struct wm_softc *sc, int reg)
   1454 {
   1455 
   1456 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1457 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1458 }
   1459 #endif
   1460 
   1461 static inline void
   1462 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1463 {
   1464 
   1465 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1466 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1467 }
   1468 
   1469 static inline void
   1470 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1471     uint32_t data)
   1472 {
   1473 	uint32_t regval;
   1474 	int i;
   1475 
   1476 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1477 
   1478 	CSR_WRITE(sc, reg, regval);
   1479 
   1480 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1481 		delay(5);
   1482 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1483 			break;
   1484 	}
   1485 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1486 		aprint_error("%s: WARNING:"
   1487 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1488 		    device_xname(sc->sc_dev), reg);
   1489 	}
   1490 }
   1491 
   1492 static inline void
   1493 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1494 {
   1495 	wa->wa_low = htole32(v & 0xffffffffU);
   1496 	if (sizeof(bus_addr_t) == 8)
   1497 		wa->wa_high = htole32((uint64_t) v >> 32);
   1498 	else
   1499 		wa->wa_high = 0;
   1500 }
   1501 
   1502 /*
   1503  * Descriptor sync/init functions.
   1504  */
   1505 static inline void
   1506 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1507 {
   1508 	struct wm_softc *sc = txq->txq_sc;
   1509 
   1510 	/* If it will wrap around, sync to the end of the ring. */
   1511 	if ((start + num) > WM_NTXDESC(txq)) {
   1512 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1513 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1514 		    (WM_NTXDESC(txq) - start), ops);
   1515 		num -= (WM_NTXDESC(txq) - start);
   1516 		start = 0;
   1517 	}
   1518 
   1519 	/* Now sync whatever is left. */
   1520 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1521 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1522 }
   1523 
   1524 static inline void
   1525 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1526 {
   1527 	struct wm_softc *sc = rxq->rxq_sc;
   1528 
   1529 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1530 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1531 }
   1532 
   1533 static inline void
   1534 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1535 {
   1536 	struct wm_softc *sc = rxq->rxq_sc;
   1537 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1538 	struct mbuf *m = rxs->rxs_mbuf;
   1539 
   1540 	/*
   1541 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1542 	 * so that the payload after the Ethernet header is aligned
   1543 	 * to a 4-byte boundary.
   1544 
   1545 	 * XXX BRAINDAMAGE ALERT!
   1546 	 * The stupid chip uses the same size for every buffer, which
   1547 	 * is set in the Receive Control register.  We are using the 2K
   1548 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1549 	 * reason, we can't "scoot" packets longer than the standard
   1550 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1551 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1552 	 * the upper layer copy the headers.
   1553 	 */
   1554 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1555 
   1556 	if (sc->sc_type == WM_T_82574) {
   1557 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1558 		rxd->erx_data.erxd_addr =
   1559 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1560 		rxd->erx_data.erxd_dd = 0;
   1561 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1562 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1563 
   1564 		rxd->nqrx_data.nrxd_paddr =
   1565 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1566 		/* Currently, split header is not supported. */
   1567 		rxd->nqrx_data.nrxd_haddr = 0;
   1568 	} else {
   1569 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1570 
   1571 		wm_set_dma_addr(&rxd->wrx_addr,
   1572 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1573 		rxd->wrx_len = 0;
   1574 		rxd->wrx_cksum = 0;
   1575 		rxd->wrx_status = 0;
   1576 		rxd->wrx_errors = 0;
   1577 		rxd->wrx_special = 0;
   1578 	}
   1579 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1580 
   1581 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1582 }
   1583 
   1584 /*
   1585  * Device driver interface functions and commonly used functions.
   1586  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1587  */
   1588 
   1589 /* Lookup supported device table */
   1590 static const struct wm_product *
   1591 wm_lookup(const struct pci_attach_args *pa)
   1592 {
   1593 	const struct wm_product *wmp;
   1594 
   1595 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1596 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1597 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1598 			return wmp;
   1599 	}
   1600 	return NULL;
   1601 }
   1602 
   1603 /* The match function (ca_match) */
   1604 static int
   1605 wm_match(device_t parent, cfdata_t cf, void *aux)
   1606 {
   1607 	struct pci_attach_args *pa = aux;
   1608 
   1609 	if (wm_lookup(pa) != NULL)
   1610 		return 1;
   1611 
   1612 	return 0;
   1613 }
   1614 
   1615 /* The attach function (ca_attach) */
   1616 static void
   1617 wm_attach(device_t parent, device_t self, void *aux)
   1618 {
   1619 	struct wm_softc *sc = device_private(self);
   1620 	struct pci_attach_args *pa = aux;
   1621 	prop_dictionary_t dict;
   1622 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1623 	pci_chipset_tag_t pc = pa->pa_pc;
   1624 	int counts[PCI_INTR_TYPE_SIZE];
   1625 	pci_intr_type_t max_type;
   1626 	const char *eetype, *xname;
   1627 	bus_space_tag_t memt;
   1628 	bus_space_handle_t memh;
   1629 	bus_size_t memsize;
   1630 	int memh_valid;
   1631 	int i, error;
   1632 	const struct wm_product *wmp;
   1633 	prop_data_t ea;
   1634 	prop_number_t pn;
   1635 	uint8_t enaddr[ETHER_ADDR_LEN];
   1636 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1637 	pcireg_t preg, memtype;
   1638 	uint16_t eeprom_data, apme_mask;
   1639 	bool force_clear_smbi;
   1640 	uint32_t link_mode;
   1641 	uint32_t reg;
   1642 	void (*deferred_start_func)(struct ifnet *) = NULL;
   1643 
   1644 	sc->sc_dev = self;
   1645 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1646 	sc->sc_core_stopping = false;
   1647 
   1648 	wmp = wm_lookup(pa);
   1649 #ifdef DIAGNOSTIC
   1650 	if (wmp == NULL) {
   1651 		printf("\n");
   1652 		panic("wm_attach: impossible");
   1653 	}
   1654 #endif
   1655 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1656 
   1657 	sc->sc_pc = pa->pa_pc;
   1658 	sc->sc_pcitag = pa->pa_tag;
   1659 
   1660 	if (pci_dma64_available(pa))
   1661 		sc->sc_dmat = pa->pa_dmat64;
   1662 	else
   1663 		sc->sc_dmat = pa->pa_dmat;
   1664 
   1665 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1666 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1667 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1668 
   1669 	sc->sc_type = wmp->wmp_type;
   1670 
   1671 	/* Set default function pointers */
   1672 	sc->phy.acquire = wm_get_null;
   1673 	sc->phy.release = wm_put_null;
   1674 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1675 
   1676 	if (sc->sc_type < WM_T_82543) {
   1677 		if (sc->sc_rev < 2) {
   1678 			aprint_error_dev(sc->sc_dev,
   1679 			    "i82542 must be at least rev. 2\n");
   1680 			return;
   1681 		}
   1682 		if (sc->sc_rev < 3)
   1683 			sc->sc_type = WM_T_82542_2_0;
   1684 	}
   1685 
   1686 	/*
   1687 	 * Disable MSI for Errata:
   1688 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1689 	 *
   1690 	 *  82544: Errata 25
   1691 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1692 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1693 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1694 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1695 	 *
   1696 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1697 	 *
   1698 	 *  82571 & 82572: Errata 63
   1699 	 */
   1700 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1701 	    || (sc->sc_type == WM_T_82572))
   1702 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1703 
   1704 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1705 	    || (sc->sc_type == WM_T_82580)
   1706 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1707 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1708 		sc->sc_flags |= WM_F_NEWQUEUE;
   1709 
   1710 	/* Set device properties (mactype) */
   1711 	dict = device_properties(sc->sc_dev);
   1712 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1713 
   1714 	/*
   1715 	 * Map the device.  All devices support memory-mapped acccess,
   1716 	 * and it is really required for normal operation.
   1717 	 */
   1718 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1719 	switch (memtype) {
   1720 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1721 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1722 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1723 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1724 		break;
   1725 	default:
   1726 		memh_valid = 0;
   1727 		break;
   1728 	}
   1729 
   1730 	if (memh_valid) {
   1731 		sc->sc_st = memt;
   1732 		sc->sc_sh = memh;
   1733 		sc->sc_ss = memsize;
   1734 	} else {
   1735 		aprint_error_dev(sc->sc_dev,
   1736 		    "unable to map device registers\n");
   1737 		return;
   1738 	}
   1739 
   1740 	/*
   1741 	 * In addition, i82544 and later support I/O mapped indirect
   1742 	 * register access.  It is not desirable (nor supported in
   1743 	 * this driver) to use it for normal operation, though it is
   1744 	 * required to work around bugs in some chip versions.
   1745 	 */
   1746 	if (sc->sc_type >= WM_T_82544) {
   1747 		/* First we have to find the I/O BAR. */
   1748 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1749 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1750 			if (memtype == PCI_MAPREG_TYPE_IO)
   1751 				break;
   1752 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1753 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1754 				i += 4;	/* skip high bits, too */
   1755 		}
   1756 		if (i < PCI_MAPREG_END) {
   1757 			/*
   1758 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1759 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1760 			 * It's no problem because newer chips has no this
   1761 			 * bug.
   1762 			 *
   1763 			 * The i8254x doesn't apparently respond when the
   1764 			 * I/O BAR is 0, which looks somewhat like it's not
   1765 			 * been configured.
   1766 			 */
   1767 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1768 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1769 				aprint_error_dev(sc->sc_dev,
   1770 				    "WARNING: I/O BAR at zero.\n");
   1771 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1772 					0, &sc->sc_iot, &sc->sc_ioh,
   1773 					NULL, &sc->sc_ios) == 0) {
   1774 				sc->sc_flags |= WM_F_IOH_VALID;
   1775 			} else {
   1776 				aprint_error_dev(sc->sc_dev,
   1777 				    "WARNING: unable to map I/O space\n");
   1778 			}
   1779 		}
   1780 
   1781 	}
   1782 
   1783 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1784 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1785 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1786 	if (sc->sc_type < WM_T_82542_2_1)
   1787 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1788 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1789 
   1790 	/* power up chip */
   1791 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1792 	    NULL)) && error != EOPNOTSUPP) {
   1793 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1794 		return;
   1795 	}
   1796 
   1797 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1798 
   1799 	/* Allocation settings */
   1800 	max_type = PCI_INTR_TYPE_MSIX;
   1801 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1802 	counts[PCI_INTR_TYPE_MSI] = 1;
   1803 	counts[PCI_INTR_TYPE_INTX] = 1;
   1804 
   1805 alloc_retry:
   1806 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1807 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1808 		return;
   1809 	}
   1810 
   1811 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1812 		error = wm_setup_msix(sc);
   1813 		if (error) {
   1814 			pci_intr_release(pc, sc->sc_intrs,
   1815 			    counts[PCI_INTR_TYPE_MSIX]);
   1816 
   1817 			/* Setup for MSI: Disable MSI-X */
   1818 			max_type = PCI_INTR_TYPE_MSI;
   1819 			counts[PCI_INTR_TYPE_MSI] = 1;
   1820 			counts[PCI_INTR_TYPE_INTX] = 1;
   1821 			goto alloc_retry;
   1822 		}
   1823 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1824 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1825 		error = wm_setup_legacy(sc);
   1826 		if (error) {
   1827 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1828 			    counts[PCI_INTR_TYPE_MSI]);
   1829 
   1830 			/* The next try is for INTx: Disable MSI */
   1831 			max_type = PCI_INTR_TYPE_INTX;
   1832 			counts[PCI_INTR_TYPE_INTX] = 1;
   1833 			goto alloc_retry;
   1834 		}
   1835 	} else {
   1836 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1837 		error = wm_setup_legacy(sc);
   1838 		if (error) {
   1839 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1840 			    counts[PCI_INTR_TYPE_INTX]);
   1841 			return;
   1842 		}
   1843 	}
   1844 
   1845 	/*
   1846 	 * Check the function ID (unit number of the chip).
   1847 	 */
   1848 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1849 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1850 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1851 	    || (sc->sc_type == WM_T_82580)
   1852 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1853 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1854 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1855 	else
   1856 		sc->sc_funcid = 0;
   1857 
   1858 	/*
   1859 	 * Determine a few things about the bus we're connected to.
   1860 	 */
   1861 	if (sc->sc_type < WM_T_82543) {
   1862 		/* We don't really know the bus characteristics here. */
   1863 		sc->sc_bus_speed = 33;
   1864 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1865 		/*
   1866 		 * CSA (Communication Streaming Architecture) is about as fast
   1867 		 * a 32-bit 66MHz PCI Bus.
   1868 		 */
   1869 		sc->sc_flags |= WM_F_CSA;
   1870 		sc->sc_bus_speed = 66;
   1871 		aprint_verbose_dev(sc->sc_dev,
   1872 		    "Communication Streaming Architecture\n");
   1873 		if (sc->sc_type == WM_T_82547) {
   1874 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1875 			callout_setfunc(&sc->sc_txfifo_ch,
   1876 					wm_82547_txfifo_stall, sc);
   1877 			aprint_verbose_dev(sc->sc_dev,
   1878 			    "using 82547 Tx FIFO stall work-around\n");
   1879 		}
   1880 	} else if (sc->sc_type >= WM_T_82571) {
   1881 		sc->sc_flags |= WM_F_PCIE;
   1882 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1883 		    && (sc->sc_type != WM_T_ICH10)
   1884 		    && (sc->sc_type != WM_T_PCH)
   1885 		    && (sc->sc_type != WM_T_PCH2)
   1886 		    && (sc->sc_type != WM_T_PCH_LPT)
   1887 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1888 			/* ICH* and PCH* have no PCIe capability registers */
   1889 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1890 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1891 				NULL) == 0)
   1892 				aprint_error_dev(sc->sc_dev,
   1893 				    "unable to find PCIe capability\n");
   1894 		}
   1895 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1896 	} else {
   1897 		reg = CSR_READ(sc, WMREG_STATUS);
   1898 		if (reg & STATUS_BUS64)
   1899 			sc->sc_flags |= WM_F_BUS64;
   1900 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1901 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1902 
   1903 			sc->sc_flags |= WM_F_PCIX;
   1904 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1905 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1906 				aprint_error_dev(sc->sc_dev,
   1907 				    "unable to find PCIX capability\n");
   1908 			else if (sc->sc_type != WM_T_82545_3 &&
   1909 				 sc->sc_type != WM_T_82546_3) {
   1910 				/*
   1911 				 * Work around a problem caused by the BIOS
   1912 				 * setting the max memory read byte count
   1913 				 * incorrectly.
   1914 				 */
   1915 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1916 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1917 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1918 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1919 
   1920 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1921 				    PCIX_CMD_BYTECNT_SHIFT;
   1922 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1923 				    PCIX_STATUS_MAXB_SHIFT;
   1924 				if (bytecnt > maxb) {
   1925 					aprint_verbose_dev(sc->sc_dev,
   1926 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1927 					    512 << bytecnt, 512 << maxb);
   1928 					pcix_cmd = (pcix_cmd &
   1929 					    ~PCIX_CMD_BYTECNT_MASK) |
   1930 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1931 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1932 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1933 					    pcix_cmd);
   1934 				}
   1935 			}
   1936 		}
   1937 		/*
   1938 		 * The quad port adapter is special; it has a PCIX-PCIX
   1939 		 * bridge on the board, and can run the secondary bus at
   1940 		 * a higher speed.
   1941 		 */
   1942 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1943 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1944 								      : 66;
   1945 		} else if (sc->sc_flags & WM_F_PCIX) {
   1946 			switch (reg & STATUS_PCIXSPD_MASK) {
   1947 			case STATUS_PCIXSPD_50_66:
   1948 				sc->sc_bus_speed = 66;
   1949 				break;
   1950 			case STATUS_PCIXSPD_66_100:
   1951 				sc->sc_bus_speed = 100;
   1952 				break;
   1953 			case STATUS_PCIXSPD_100_133:
   1954 				sc->sc_bus_speed = 133;
   1955 				break;
   1956 			default:
   1957 				aprint_error_dev(sc->sc_dev,
   1958 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1959 				    reg & STATUS_PCIXSPD_MASK);
   1960 				sc->sc_bus_speed = 66;
   1961 				break;
   1962 			}
   1963 		} else
   1964 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1965 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1966 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1967 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1968 	}
   1969 
   1970 	/* clear interesting stat counters */
   1971 	CSR_READ(sc, WMREG_COLC);
   1972 	CSR_READ(sc, WMREG_RXERRC);
   1973 
   1974 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1975 	    || (sc->sc_type >= WM_T_ICH8))
   1976 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1977 	if (sc->sc_type >= WM_T_ICH8)
   1978 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1979 
   1980 	/* Set PHY, NVM mutex related stuff */
   1981 	switch (sc->sc_type) {
   1982 	case WM_T_82542_2_0:
   1983 	case WM_T_82542_2_1:
   1984 	case WM_T_82543:
   1985 	case WM_T_82544:
   1986 		/* Microwire */
   1987 		sc->sc_nvm_wordsize = 64;
   1988 		sc->sc_nvm_addrbits = 6;
   1989 		break;
   1990 	case WM_T_82540:
   1991 	case WM_T_82545:
   1992 	case WM_T_82545_3:
   1993 	case WM_T_82546:
   1994 	case WM_T_82546_3:
   1995 		/* Microwire */
   1996 		reg = CSR_READ(sc, WMREG_EECD);
   1997 		if (reg & EECD_EE_SIZE) {
   1998 			sc->sc_nvm_wordsize = 256;
   1999 			sc->sc_nvm_addrbits = 8;
   2000 		} else {
   2001 			sc->sc_nvm_wordsize = 64;
   2002 			sc->sc_nvm_addrbits = 6;
   2003 		}
   2004 		sc->sc_flags |= WM_F_LOCK_EECD;
   2005 		break;
   2006 	case WM_T_82541:
   2007 	case WM_T_82541_2:
   2008 	case WM_T_82547:
   2009 	case WM_T_82547_2:
   2010 		sc->sc_flags |= WM_F_LOCK_EECD;
   2011 		reg = CSR_READ(sc, WMREG_EECD);
   2012 		if (reg & EECD_EE_TYPE) {
   2013 			/* SPI */
   2014 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2015 			wm_nvm_set_addrbits_size_eecd(sc);
   2016 		} else {
   2017 			/* Microwire */
   2018 			if ((reg & EECD_EE_ABITS) != 0) {
   2019 				sc->sc_nvm_wordsize = 256;
   2020 				sc->sc_nvm_addrbits = 8;
   2021 			} else {
   2022 				sc->sc_nvm_wordsize = 64;
   2023 				sc->sc_nvm_addrbits = 6;
   2024 			}
   2025 		}
   2026 		break;
   2027 	case WM_T_82571:
   2028 	case WM_T_82572:
   2029 		/* SPI */
   2030 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2031 		wm_nvm_set_addrbits_size_eecd(sc);
   2032 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2033 		sc->phy.acquire = wm_get_swsm_semaphore;
   2034 		sc->phy.release = wm_put_swsm_semaphore;
   2035 		break;
   2036 	case WM_T_82573:
   2037 	case WM_T_82574:
   2038 	case WM_T_82583:
   2039 		if (sc->sc_type == WM_T_82573) {
   2040 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2041 			sc->phy.acquire = wm_get_swsm_semaphore;
   2042 			sc->phy.release = wm_put_swsm_semaphore;
   2043 		} else {
   2044 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2045 			/* Both PHY and NVM use the same semaphore. */
   2046 			sc->phy.acquire
   2047 			    = wm_get_swfwhw_semaphore;
   2048 			sc->phy.release
   2049 			    = wm_put_swfwhw_semaphore;
   2050 		}
   2051 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2052 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2053 			sc->sc_nvm_wordsize = 2048;
   2054 		} else {
   2055 			/* SPI */
   2056 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2057 			wm_nvm_set_addrbits_size_eecd(sc);
   2058 		}
   2059 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2060 		break;
   2061 	case WM_T_82575:
   2062 	case WM_T_82576:
   2063 	case WM_T_82580:
   2064 	case WM_T_I350:
   2065 	case WM_T_I354:
   2066 	case WM_T_80003:
   2067 		/* SPI */
   2068 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2069 		wm_nvm_set_addrbits_size_eecd(sc);
   2070 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2071 		    | WM_F_LOCK_SWSM;
   2072 		sc->phy.acquire = wm_get_phy_82575;
   2073 		sc->phy.release = wm_put_phy_82575;
   2074 		break;
   2075 	case WM_T_ICH8:
   2076 	case WM_T_ICH9:
   2077 	case WM_T_ICH10:
   2078 	case WM_T_PCH:
   2079 	case WM_T_PCH2:
   2080 	case WM_T_PCH_LPT:
   2081 		/* FLASH */
   2082 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2083 		sc->sc_nvm_wordsize = 2048;
   2084 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2085 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2086 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2087 			aprint_error_dev(sc->sc_dev,
   2088 			    "can't map FLASH registers\n");
   2089 			goto out;
   2090 		}
   2091 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2092 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2093 		    ICH_FLASH_SECTOR_SIZE;
   2094 		sc->sc_ich8_flash_bank_size =
   2095 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2096 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2097 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2098 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2099 		sc->sc_flashreg_offset = 0;
   2100 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2101 		sc->phy.release = wm_put_swflag_ich8lan;
   2102 		break;
   2103 	case WM_T_PCH_SPT:
   2104 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2105 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2106 		sc->sc_flasht = sc->sc_st;
   2107 		sc->sc_flashh = sc->sc_sh;
   2108 		sc->sc_ich8_flash_base = 0;
   2109 		sc->sc_nvm_wordsize =
   2110 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2111 			* NVM_SIZE_MULTIPLIER;
   2112 		/* It is size in bytes, we want words */
   2113 		sc->sc_nvm_wordsize /= 2;
   2114 		/* assume 2 banks */
   2115 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2116 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2117 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2118 		sc->phy.release = wm_put_swflag_ich8lan;
   2119 		break;
   2120 	case WM_T_I210:
   2121 	case WM_T_I211:
   2122 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2123 			wm_nvm_set_addrbits_size_eecd(sc);
   2124 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2125 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2126 		} else {
   2127 			sc->sc_nvm_wordsize = INVM_SIZE;
   2128 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2129 		}
   2130 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2131 		sc->phy.acquire = wm_get_phy_82575;
   2132 		sc->phy.release = wm_put_phy_82575;
   2133 		break;
   2134 	default:
   2135 		break;
   2136 	}
   2137 
   2138 	/* Reset the chip to a known state. */
   2139 	wm_reset(sc);
   2140 
   2141 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2142 	switch (sc->sc_type) {
   2143 	case WM_T_82571:
   2144 	case WM_T_82572:
   2145 		reg = CSR_READ(sc, WMREG_SWSM2);
   2146 		if ((reg & SWSM2_LOCK) == 0) {
   2147 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2148 			force_clear_smbi = true;
   2149 		} else
   2150 			force_clear_smbi = false;
   2151 		break;
   2152 	case WM_T_82573:
   2153 	case WM_T_82574:
   2154 	case WM_T_82583:
   2155 		force_clear_smbi = true;
   2156 		break;
   2157 	default:
   2158 		force_clear_smbi = false;
   2159 		break;
   2160 	}
   2161 	if (force_clear_smbi) {
   2162 		reg = CSR_READ(sc, WMREG_SWSM);
   2163 		if ((reg & SWSM_SMBI) != 0)
   2164 			aprint_error_dev(sc->sc_dev,
   2165 			    "Please update the Bootagent\n");
   2166 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2167 	}
   2168 
   2169 	/*
   2170 	 * Defer printing the EEPROM type until after verifying the checksum
   2171 	 * This allows the EEPROM type to be printed correctly in the case
   2172 	 * that no EEPROM is attached.
   2173 	 */
   2174 	/*
   2175 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2176 	 * this for later, so we can fail future reads from the EEPROM.
   2177 	 */
   2178 	if (wm_nvm_validate_checksum(sc)) {
   2179 		/*
   2180 		 * Read twice again because some PCI-e parts fail the
   2181 		 * first check due to the link being in sleep state.
   2182 		 */
   2183 		if (wm_nvm_validate_checksum(sc))
   2184 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2185 	}
   2186 
   2187 	/* Set device properties (macflags) */
   2188 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2189 
   2190 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2191 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2192 	else {
   2193 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2194 		    sc->sc_nvm_wordsize);
   2195 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2196 			aprint_verbose("iNVM");
   2197 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2198 			aprint_verbose("FLASH(HW)");
   2199 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2200 			aprint_verbose("FLASH");
   2201 		else {
   2202 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2203 				eetype = "SPI";
   2204 			else
   2205 				eetype = "MicroWire";
   2206 			aprint_verbose("(%d address bits) %s EEPROM",
   2207 			    sc->sc_nvm_addrbits, eetype);
   2208 		}
   2209 	}
   2210 	wm_nvm_version(sc);
   2211 	aprint_verbose("\n");
   2212 
   2213 	/* Check for I21[01] PLL workaround */
   2214 	if (sc->sc_type == WM_T_I210)
   2215 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2216 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2217 		/* NVM image release 3.25 has a workaround */
   2218 		if ((sc->sc_nvm_ver_major < 3)
   2219 		    || ((sc->sc_nvm_ver_major == 3)
   2220 			&& (sc->sc_nvm_ver_minor < 25))) {
   2221 			aprint_verbose_dev(sc->sc_dev,
   2222 			    "ROM image version %d.%d is older than 3.25\n",
   2223 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2224 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2225 		}
   2226 	}
   2227 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2228 		wm_pll_workaround_i210(sc);
   2229 
   2230 	wm_get_wakeup(sc);
   2231 
   2232 	/* Non-AMT based hardware can now take control from firmware */
   2233 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2234 		wm_get_hw_control(sc);
   2235 
   2236 	/*
   2237 	 * Read the Ethernet address from the EEPROM, if not first found
   2238 	 * in device properties.
   2239 	 */
   2240 	ea = prop_dictionary_get(dict, "mac-address");
   2241 	if (ea != NULL) {
   2242 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2243 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2244 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2245 	} else {
   2246 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2247 			aprint_error_dev(sc->sc_dev,
   2248 			    "unable to read Ethernet address\n");
   2249 			goto out;
   2250 		}
   2251 	}
   2252 
   2253 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2254 	    ether_sprintf(enaddr));
   2255 
   2256 	/*
   2257 	 * Read the config info from the EEPROM, and set up various
   2258 	 * bits in the control registers based on their contents.
   2259 	 */
   2260 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2261 	if (pn != NULL) {
   2262 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2263 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2264 	} else {
   2265 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2266 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2267 			goto out;
   2268 		}
   2269 	}
   2270 
   2271 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2272 	if (pn != NULL) {
   2273 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2274 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2275 	} else {
   2276 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2277 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2278 			goto out;
   2279 		}
   2280 	}
   2281 
   2282 	/* check for WM_F_WOL */
   2283 	switch (sc->sc_type) {
   2284 	case WM_T_82542_2_0:
   2285 	case WM_T_82542_2_1:
   2286 	case WM_T_82543:
   2287 		/* dummy? */
   2288 		eeprom_data = 0;
   2289 		apme_mask = NVM_CFG3_APME;
   2290 		break;
   2291 	case WM_T_82544:
   2292 		apme_mask = NVM_CFG2_82544_APM_EN;
   2293 		eeprom_data = cfg2;
   2294 		break;
   2295 	case WM_T_82546:
   2296 	case WM_T_82546_3:
   2297 	case WM_T_82571:
   2298 	case WM_T_82572:
   2299 	case WM_T_82573:
   2300 	case WM_T_82574:
   2301 	case WM_T_82583:
   2302 	case WM_T_80003:
   2303 	default:
   2304 		apme_mask = NVM_CFG3_APME;
   2305 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2306 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2307 		break;
   2308 	case WM_T_82575:
   2309 	case WM_T_82576:
   2310 	case WM_T_82580:
   2311 	case WM_T_I350:
   2312 	case WM_T_I354: /* XXX ok? */
   2313 	case WM_T_ICH8:
   2314 	case WM_T_ICH9:
   2315 	case WM_T_ICH10:
   2316 	case WM_T_PCH:
   2317 	case WM_T_PCH2:
   2318 	case WM_T_PCH_LPT:
   2319 	case WM_T_PCH_SPT:
   2320 		/* XXX The funcid should be checked on some devices */
   2321 		apme_mask = WUC_APME;
   2322 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2323 		break;
   2324 	}
   2325 
   2326 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2327 	if ((eeprom_data & apme_mask) != 0)
   2328 		sc->sc_flags |= WM_F_WOL;
   2329 #ifdef WM_DEBUG
   2330 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2331 		printf("WOL\n");
   2332 #endif
   2333 
   2334 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2335 		/* Check NVM for autonegotiation */
   2336 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2337 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2338 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2339 		}
   2340 	}
   2341 
   2342 	/*
   2343 	 * XXX need special handling for some multiple port cards
   2344 	 * to disable a paticular port.
   2345 	 */
   2346 
   2347 	if (sc->sc_type >= WM_T_82544) {
   2348 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2349 		if (pn != NULL) {
   2350 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2351 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2352 		} else {
   2353 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2354 				aprint_error_dev(sc->sc_dev,
   2355 				    "unable to read SWDPIN\n");
   2356 				goto out;
   2357 			}
   2358 		}
   2359 	}
   2360 
   2361 	if (cfg1 & NVM_CFG1_ILOS)
   2362 		sc->sc_ctrl |= CTRL_ILOS;
   2363 
   2364 	/*
   2365 	 * XXX
   2366 	 * This code isn't correct because pin 2 and 3 are located
   2367 	 * in different position on newer chips. Check all datasheet.
   2368 	 *
   2369 	 * Until resolve this problem, check if a chip < 82580
   2370 	 */
   2371 	if (sc->sc_type <= WM_T_82580) {
   2372 		if (sc->sc_type >= WM_T_82544) {
   2373 			sc->sc_ctrl |=
   2374 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2375 			    CTRL_SWDPIO_SHIFT;
   2376 			sc->sc_ctrl |=
   2377 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2378 			    CTRL_SWDPINS_SHIFT;
   2379 		} else {
   2380 			sc->sc_ctrl |=
   2381 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2382 			    CTRL_SWDPIO_SHIFT;
   2383 		}
   2384 	}
   2385 
   2386 	/* XXX For other than 82580? */
   2387 	if (sc->sc_type == WM_T_82580) {
   2388 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2389 		if (nvmword & __BIT(13))
   2390 			sc->sc_ctrl |= CTRL_ILOS;
   2391 	}
   2392 
   2393 #if 0
   2394 	if (sc->sc_type >= WM_T_82544) {
   2395 		if (cfg1 & NVM_CFG1_IPS0)
   2396 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2397 		if (cfg1 & NVM_CFG1_IPS1)
   2398 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2399 		sc->sc_ctrl_ext |=
   2400 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2401 		    CTRL_EXT_SWDPIO_SHIFT;
   2402 		sc->sc_ctrl_ext |=
   2403 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2404 		    CTRL_EXT_SWDPINS_SHIFT;
   2405 	} else {
   2406 		sc->sc_ctrl_ext |=
   2407 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2408 		    CTRL_EXT_SWDPIO_SHIFT;
   2409 	}
   2410 #endif
   2411 
   2412 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2413 #if 0
   2414 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2415 #endif
   2416 
   2417 	if (sc->sc_type == WM_T_PCH) {
   2418 		uint16_t val;
   2419 
   2420 		/* Save the NVM K1 bit setting */
   2421 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2422 
   2423 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2424 			sc->sc_nvm_k1_enabled = 1;
   2425 		else
   2426 			sc->sc_nvm_k1_enabled = 0;
   2427 	}
   2428 
   2429 	/*
   2430 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2431 	 * media structures accordingly.
   2432 	 */
   2433 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2434 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2435 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2436 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2437 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2438 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2439 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2440 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2441 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2442 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2443 	    || (sc->sc_type ==WM_T_I211)) {
   2444 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2445 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2446 		switch (link_mode) {
   2447 		case CTRL_EXT_LINK_MODE_1000KX:
   2448 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2449 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2450 			break;
   2451 		case CTRL_EXT_LINK_MODE_SGMII:
   2452 			if (wm_sgmii_uses_mdio(sc)) {
   2453 				aprint_verbose_dev(sc->sc_dev,
   2454 				    "SGMII(MDIO)\n");
   2455 				sc->sc_flags |= WM_F_SGMII;
   2456 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2457 				break;
   2458 			}
   2459 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2460 			/*FALLTHROUGH*/
   2461 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2462 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2463 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2464 				if (link_mode
   2465 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2466 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2467 					sc->sc_flags |= WM_F_SGMII;
   2468 				} else {
   2469 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2470 					aprint_verbose_dev(sc->sc_dev,
   2471 					    "SERDES\n");
   2472 				}
   2473 				break;
   2474 			}
   2475 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2476 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2477 
   2478 			/* Change current link mode setting */
   2479 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2480 			switch (sc->sc_mediatype) {
   2481 			case WM_MEDIATYPE_COPPER:
   2482 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2483 				break;
   2484 			case WM_MEDIATYPE_SERDES:
   2485 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2486 				break;
   2487 			default:
   2488 				break;
   2489 			}
   2490 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2491 			break;
   2492 		case CTRL_EXT_LINK_MODE_GMII:
   2493 		default:
   2494 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2495 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2496 			break;
   2497 		}
   2498 
   2499 		reg &= ~CTRL_EXT_I2C_ENA;
   2500 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2501 			reg |= CTRL_EXT_I2C_ENA;
   2502 		else
   2503 			reg &= ~CTRL_EXT_I2C_ENA;
   2504 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2505 
   2506 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2507 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2508 		else
   2509 			wm_tbi_mediainit(sc);
   2510 	} else if (sc->sc_type < WM_T_82543 ||
   2511 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2512 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2513 			aprint_error_dev(sc->sc_dev,
   2514 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2515 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2516 		}
   2517 		wm_tbi_mediainit(sc);
   2518 	} else {
   2519 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2520 			aprint_error_dev(sc->sc_dev,
   2521 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2522 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2523 		}
   2524 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2525 	}
   2526 
   2527 	ifp = &sc->sc_ethercom.ec_if;
   2528 	xname = device_xname(sc->sc_dev);
   2529 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2530 	ifp->if_softc = sc;
   2531 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2532 	ifp->if_extflags = IFEF_START_MPSAFE;
   2533 	ifp->if_ioctl = wm_ioctl;
   2534 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2535 		ifp->if_start = wm_nq_start;
   2536 		if (sc->sc_nqueues > 1) {
   2537 			ifp->if_transmit = wm_nq_transmit;
   2538 			deferred_start_func = wm_deferred_start;
   2539 		}
   2540 	} else {
   2541 		ifp->if_start = wm_start;
   2542 		if (sc->sc_nqueues > 1) {
   2543 			ifp->if_transmit = wm_transmit;
   2544 			deferred_start_func = wm_deferred_start;
   2545 		}
   2546 	}
   2547 	ifp->if_watchdog = wm_watchdog;
   2548 	ifp->if_init = wm_init;
   2549 	ifp->if_stop = wm_stop;
   2550 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2551 	IFQ_SET_READY(&ifp->if_snd);
   2552 
   2553 	/* Check for jumbo frame */
   2554 	switch (sc->sc_type) {
   2555 	case WM_T_82573:
   2556 		/* XXX limited to 9234 if ASPM is disabled */
   2557 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2558 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2559 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2560 		break;
   2561 	case WM_T_82571:
   2562 	case WM_T_82572:
   2563 	case WM_T_82574:
   2564 	case WM_T_82575:
   2565 	case WM_T_82576:
   2566 	case WM_T_82580:
   2567 	case WM_T_I350:
   2568 	case WM_T_I354: /* XXXX ok? */
   2569 	case WM_T_I210:
   2570 	case WM_T_I211:
   2571 	case WM_T_80003:
   2572 	case WM_T_ICH9:
   2573 	case WM_T_ICH10:
   2574 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2575 	case WM_T_PCH_LPT:
   2576 	case WM_T_PCH_SPT:
   2577 		/* XXX limited to 9234 */
   2578 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2579 		break;
   2580 	case WM_T_PCH:
   2581 		/* XXX limited to 4096 */
   2582 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2583 		break;
   2584 	case WM_T_82542_2_0:
   2585 	case WM_T_82542_2_1:
   2586 	case WM_T_82583:
   2587 	case WM_T_ICH8:
   2588 		/* No support for jumbo frame */
   2589 		break;
   2590 	default:
   2591 		/* ETHER_MAX_LEN_JUMBO */
   2592 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2593 		break;
   2594 	}
   2595 
   2596 	/* If we're a i82543 or greater, we can support VLANs. */
   2597 	if (sc->sc_type >= WM_T_82543)
   2598 		sc->sc_ethercom.ec_capabilities |=
   2599 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2600 
   2601 	/*
   2602 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2603 	 * on i82543 and later.
   2604 	 */
   2605 	if (sc->sc_type >= WM_T_82543) {
   2606 		ifp->if_capabilities |=
   2607 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2608 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2609 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2610 		    IFCAP_CSUM_TCPv6_Tx |
   2611 		    IFCAP_CSUM_UDPv6_Tx;
   2612 	}
   2613 
   2614 	/*
   2615 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2616 	 *
   2617 	 *	82541GI (8086:1076) ... no
   2618 	 *	82572EI (8086:10b9) ... yes
   2619 	 */
   2620 	if (sc->sc_type >= WM_T_82571) {
   2621 		ifp->if_capabilities |=
   2622 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2623 	}
   2624 
   2625 	/*
   2626 	 * If we're a i82544 or greater (except i82547), we can do
   2627 	 * TCP segmentation offload.
   2628 	 */
   2629 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2630 		ifp->if_capabilities |= IFCAP_TSOv4;
   2631 	}
   2632 
   2633 	if (sc->sc_type >= WM_T_82571) {
   2634 		ifp->if_capabilities |= IFCAP_TSOv6;
   2635 	}
   2636 
   2637 #ifdef WM_MPSAFE
   2638 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2639 #else
   2640 	sc->sc_core_lock = NULL;
   2641 #endif
   2642 
   2643 	/* Attach the interface. */
   2644 	if_initialize(ifp);
   2645 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2646 	if_deferred_start_init(ifp, deferred_start_func);
   2647 	ether_ifattach(ifp, enaddr);
   2648 	if_register(ifp);
   2649 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2650 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2651 			  RND_FLAG_DEFAULT);
   2652 
   2653 #ifdef WM_EVENT_COUNTERS
   2654 	/* Attach event counters. */
   2655 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2656 	    NULL, xname, "linkintr");
   2657 
   2658 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2659 	    NULL, xname, "tx_xoff");
   2660 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2661 	    NULL, xname, "tx_xon");
   2662 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2663 	    NULL, xname, "rx_xoff");
   2664 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2665 	    NULL, xname, "rx_xon");
   2666 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2667 	    NULL, xname, "rx_macctl");
   2668 #endif /* WM_EVENT_COUNTERS */
   2669 
   2670 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2671 		pmf_class_network_register(self, ifp);
   2672 	else
   2673 		aprint_error_dev(self, "couldn't establish power handler\n");
   2674 
   2675 	sc->sc_flags |= WM_F_ATTACHED;
   2676  out:
   2677 	return;
   2678 }
   2679 
   2680 /* The detach function (ca_detach) */
   2681 static int
   2682 wm_detach(device_t self, int flags __unused)
   2683 {
   2684 	struct wm_softc *sc = device_private(self);
   2685 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2686 	int i;
   2687 
   2688 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2689 		return 0;
   2690 
   2691 	/* Stop the interface. Callouts are stopped in it. */
   2692 	wm_stop(ifp, 1);
   2693 
   2694 	pmf_device_deregister(self);
   2695 
   2696 	/* Tell the firmware about the release */
   2697 	WM_CORE_LOCK(sc);
   2698 	wm_release_manageability(sc);
   2699 	wm_release_hw_control(sc);
   2700 	wm_enable_wakeup(sc);
   2701 	WM_CORE_UNLOCK(sc);
   2702 
   2703 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2704 
   2705 	/* Delete all remaining media. */
   2706 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2707 
   2708 	ether_ifdetach(ifp);
   2709 	if_detach(ifp);
   2710 	if_percpuq_destroy(sc->sc_ipq);
   2711 
   2712 	/* Unload RX dmamaps and free mbufs */
   2713 	for (i = 0; i < sc->sc_nqueues; i++) {
   2714 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2715 		mutex_enter(rxq->rxq_lock);
   2716 		wm_rxdrain(rxq);
   2717 		mutex_exit(rxq->rxq_lock);
   2718 	}
   2719 	/* Must unlock here */
   2720 
   2721 	/* Disestablish the interrupt handler */
   2722 	for (i = 0; i < sc->sc_nintrs; i++) {
   2723 		if (sc->sc_ihs[i] != NULL) {
   2724 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2725 			sc->sc_ihs[i] = NULL;
   2726 		}
   2727 	}
   2728 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2729 
   2730 	wm_free_txrx_queues(sc);
   2731 
   2732 	/* Unmap the registers */
   2733 	if (sc->sc_ss) {
   2734 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2735 		sc->sc_ss = 0;
   2736 	}
   2737 	if (sc->sc_ios) {
   2738 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2739 		sc->sc_ios = 0;
   2740 	}
   2741 	if (sc->sc_flashs) {
   2742 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2743 		sc->sc_flashs = 0;
   2744 	}
   2745 
   2746 	if (sc->sc_core_lock)
   2747 		mutex_obj_free(sc->sc_core_lock);
   2748 	if (sc->sc_ich_phymtx)
   2749 		mutex_obj_free(sc->sc_ich_phymtx);
   2750 	if (sc->sc_ich_nvmmtx)
   2751 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2752 
   2753 	return 0;
   2754 }
   2755 
   2756 static bool
   2757 wm_suspend(device_t self, const pmf_qual_t *qual)
   2758 {
   2759 	struct wm_softc *sc = device_private(self);
   2760 
   2761 	wm_release_manageability(sc);
   2762 	wm_release_hw_control(sc);
   2763 	wm_enable_wakeup(sc);
   2764 
   2765 	return true;
   2766 }
   2767 
   2768 static bool
   2769 wm_resume(device_t self, const pmf_qual_t *qual)
   2770 {
   2771 	struct wm_softc *sc = device_private(self);
   2772 
   2773 	wm_init_manageability(sc);
   2774 
   2775 	return true;
   2776 }
   2777 
   2778 /*
   2779  * wm_watchdog:		[ifnet interface function]
   2780  *
   2781  *	Watchdog timer handler.
   2782  */
   2783 static void
   2784 wm_watchdog(struct ifnet *ifp)
   2785 {
   2786 	int qid;
   2787 	struct wm_softc *sc = ifp->if_softc;
   2788 
   2789 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2790 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2791 
   2792 		wm_watchdog_txq(ifp, txq);
   2793 	}
   2794 
   2795 	/* Reset the interface. */
   2796 	(void) wm_init(ifp);
   2797 
   2798 	/*
   2799 	 * There are still some upper layer processing which call
   2800 	 * ifp->if_start(). e.g. ALTQ
   2801 	 */
   2802 	/* Try to get more packets going. */
   2803 	ifp->if_start(ifp);
   2804 }
   2805 
   2806 static void
   2807 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2808 {
   2809 	struct wm_softc *sc = ifp->if_softc;
   2810 
   2811 	/*
   2812 	 * Since we're using delayed interrupts, sweep up
   2813 	 * before we report an error.
   2814 	 */
   2815 	mutex_enter(txq->txq_lock);
   2816 	wm_txeof(sc, txq);
   2817 	mutex_exit(txq->txq_lock);
   2818 
   2819 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2820 #ifdef WM_DEBUG
   2821 		int i, j;
   2822 		struct wm_txsoft *txs;
   2823 #endif
   2824 		log(LOG_ERR,
   2825 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2826 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2827 		    txq->txq_next);
   2828 		ifp->if_oerrors++;
   2829 #ifdef WM_DEBUG
   2830 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2831 		    i = WM_NEXTTXS(txq, i)) {
   2832 		    txs = &txq->txq_soft[i];
   2833 		    printf("txs %d tx %d -> %d\n",
   2834 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2835 		    for (j = txs->txs_firstdesc; ;
   2836 			j = WM_NEXTTX(txq, j)) {
   2837 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2838 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2839 			printf("\t %#08x%08x\n",
   2840 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2841 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2842 			if (j == txs->txs_lastdesc)
   2843 				break;
   2844 			}
   2845 		}
   2846 #endif
   2847 	}
   2848 }
   2849 
   2850 /*
   2851  * wm_tick:
   2852  *
   2853  *	One second timer, used to check link status, sweep up
   2854  *	completed transmit jobs, etc.
   2855  */
   2856 static void
   2857 wm_tick(void *arg)
   2858 {
   2859 	struct wm_softc *sc = arg;
   2860 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2861 #ifndef WM_MPSAFE
   2862 	int s = splnet();
   2863 #endif
   2864 
   2865 	WM_CORE_LOCK(sc);
   2866 
   2867 	if (sc->sc_core_stopping)
   2868 		goto out;
   2869 
   2870 	if (sc->sc_type >= WM_T_82542_2_1) {
   2871 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2872 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2873 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2874 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2875 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2876 	}
   2877 
   2878 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2879 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2880 	    + CSR_READ(sc, WMREG_CRCERRS)
   2881 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2882 	    + CSR_READ(sc, WMREG_SYMERRC)
   2883 	    + CSR_READ(sc, WMREG_RXERRC)
   2884 	    + CSR_READ(sc, WMREG_SEC)
   2885 	    + CSR_READ(sc, WMREG_CEXTERR)
   2886 	    + CSR_READ(sc, WMREG_RLEC);
   2887 	/*
   2888 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2889 	 * memory. It does not mean the number of dropped packet. Because
   2890 	 * ethernet controller can receive packets in such case if there is
   2891 	 * space in phy's FIFO.
   2892 	 *
   2893 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2894 	 * own EVCNT instead of if_iqdrops.
   2895 	 */
   2896 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2897 
   2898 	if (sc->sc_flags & WM_F_HAS_MII)
   2899 		mii_tick(&sc->sc_mii);
   2900 	else if ((sc->sc_type >= WM_T_82575)
   2901 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2902 		wm_serdes_tick(sc);
   2903 	else
   2904 		wm_tbi_tick(sc);
   2905 
   2906 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2907 out:
   2908 	WM_CORE_UNLOCK(sc);
   2909 #ifndef WM_MPSAFE
   2910 	splx(s);
   2911 #endif
   2912 }
   2913 
   2914 static int
   2915 wm_ifflags_cb(struct ethercom *ec)
   2916 {
   2917 	struct ifnet *ifp = &ec->ec_if;
   2918 	struct wm_softc *sc = ifp->if_softc;
   2919 	int rc = 0;
   2920 
   2921 	WM_CORE_LOCK(sc);
   2922 
   2923 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2924 	sc->sc_if_flags = ifp->if_flags;
   2925 
   2926 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2927 		rc = ENETRESET;
   2928 		goto out;
   2929 	}
   2930 
   2931 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2932 		wm_set_filter(sc);
   2933 
   2934 	wm_set_vlan(sc);
   2935 
   2936 out:
   2937 	WM_CORE_UNLOCK(sc);
   2938 
   2939 	return rc;
   2940 }
   2941 
   2942 /*
   2943  * wm_ioctl:		[ifnet interface function]
   2944  *
   2945  *	Handle control requests from the operator.
   2946  */
   2947 static int
   2948 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2949 {
   2950 	struct wm_softc *sc = ifp->if_softc;
   2951 	struct ifreq *ifr = (struct ifreq *) data;
   2952 	struct ifaddr *ifa = (struct ifaddr *)data;
   2953 	struct sockaddr_dl *sdl;
   2954 	int s, error;
   2955 
   2956 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2957 		device_xname(sc->sc_dev), __func__));
   2958 
   2959 #ifndef WM_MPSAFE
   2960 	s = splnet();
   2961 #endif
   2962 	switch (cmd) {
   2963 	case SIOCSIFMEDIA:
   2964 	case SIOCGIFMEDIA:
   2965 		WM_CORE_LOCK(sc);
   2966 		/* Flow control requires full-duplex mode. */
   2967 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2968 		    (ifr->ifr_media & IFM_FDX) == 0)
   2969 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2970 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2971 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2972 				/* We can do both TXPAUSE and RXPAUSE. */
   2973 				ifr->ifr_media |=
   2974 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2975 			}
   2976 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2977 		}
   2978 		WM_CORE_UNLOCK(sc);
   2979 #ifdef WM_MPSAFE
   2980 		s = splnet();
   2981 #endif
   2982 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2983 #ifdef WM_MPSAFE
   2984 		splx(s);
   2985 #endif
   2986 		break;
   2987 	case SIOCINITIFADDR:
   2988 		WM_CORE_LOCK(sc);
   2989 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2990 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2991 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2992 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2993 			/* unicast address is first multicast entry */
   2994 			wm_set_filter(sc);
   2995 			error = 0;
   2996 			WM_CORE_UNLOCK(sc);
   2997 			break;
   2998 		}
   2999 		WM_CORE_UNLOCK(sc);
   3000 		/*FALLTHROUGH*/
   3001 	default:
   3002 #ifdef WM_MPSAFE
   3003 		s = splnet();
   3004 #endif
   3005 		/* It may call wm_start, so unlock here */
   3006 		error = ether_ioctl(ifp, cmd, data);
   3007 #ifdef WM_MPSAFE
   3008 		splx(s);
   3009 #endif
   3010 		if (error != ENETRESET)
   3011 			break;
   3012 
   3013 		error = 0;
   3014 
   3015 		if (cmd == SIOCSIFCAP) {
   3016 			error = (*ifp->if_init)(ifp);
   3017 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3018 			;
   3019 		else if (ifp->if_flags & IFF_RUNNING) {
   3020 			/*
   3021 			 * Multicast list has changed; set the hardware filter
   3022 			 * accordingly.
   3023 			 */
   3024 			WM_CORE_LOCK(sc);
   3025 			wm_set_filter(sc);
   3026 			WM_CORE_UNLOCK(sc);
   3027 		}
   3028 		break;
   3029 	}
   3030 
   3031 #ifndef WM_MPSAFE
   3032 	splx(s);
   3033 #endif
   3034 	return error;
   3035 }
   3036 
   3037 /* MAC address related */
   3038 
   3039 /*
   3040  * Get the offset of MAC address and return it.
   3041  * If error occured, use offset 0.
   3042  */
   3043 static uint16_t
   3044 wm_check_alt_mac_addr(struct wm_softc *sc)
   3045 {
   3046 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3047 	uint16_t offset = NVM_OFF_MACADDR;
   3048 
   3049 	/* Try to read alternative MAC address pointer */
   3050 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3051 		return 0;
   3052 
   3053 	/* Check pointer if it's valid or not. */
   3054 	if ((offset == 0x0000) || (offset == 0xffff))
   3055 		return 0;
   3056 
   3057 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3058 	/*
   3059 	 * Check whether alternative MAC address is valid or not.
   3060 	 * Some cards have non 0xffff pointer but those don't use
   3061 	 * alternative MAC address in reality.
   3062 	 *
   3063 	 * Check whether the broadcast bit is set or not.
   3064 	 */
   3065 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3066 		if (((myea[0] & 0xff) & 0x01) == 0)
   3067 			return offset; /* Found */
   3068 
   3069 	/* Not found */
   3070 	return 0;
   3071 }
   3072 
   3073 static int
   3074 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3075 {
   3076 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3077 	uint16_t offset = NVM_OFF_MACADDR;
   3078 	int do_invert = 0;
   3079 
   3080 	switch (sc->sc_type) {
   3081 	case WM_T_82580:
   3082 	case WM_T_I350:
   3083 	case WM_T_I354:
   3084 		/* EEPROM Top Level Partitioning */
   3085 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3086 		break;
   3087 	case WM_T_82571:
   3088 	case WM_T_82575:
   3089 	case WM_T_82576:
   3090 	case WM_T_80003:
   3091 	case WM_T_I210:
   3092 	case WM_T_I211:
   3093 		offset = wm_check_alt_mac_addr(sc);
   3094 		if (offset == 0)
   3095 			if ((sc->sc_funcid & 0x01) == 1)
   3096 				do_invert = 1;
   3097 		break;
   3098 	default:
   3099 		if ((sc->sc_funcid & 0x01) == 1)
   3100 			do_invert = 1;
   3101 		break;
   3102 	}
   3103 
   3104 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3105 		goto bad;
   3106 
   3107 	enaddr[0] = myea[0] & 0xff;
   3108 	enaddr[1] = myea[0] >> 8;
   3109 	enaddr[2] = myea[1] & 0xff;
   3110 	enaddr[3] = myea[1] >> 8;
   3111 	enaddr[4] = myea[2] & 0xff;
   3112 	enaddr[5] = myea[2] >> 8;
   3113 
   3114 	/*
   3115 	 * Toggle the LSB of the MAC address on the second port
   3116 	 * of some dual port cards.
   3117 	 */
   3118 	if (do_invert != 0)
   3119 		enaddr[5] ^= 1;
   3120 
   3121 	return 0;
   3122 
   3123  bad:
   3124 	return -1;
   3125 }
   3126 
   3127 /*
   3128  * wm_set_ral:
   3129  *
   3130  *	Set an entery in the receive address list.
   3131  */
   3132 static void
   3133 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3134 {
   3135 	uint32_t ral_lo, ral_hi;
   3136 
   3137 	if (enaddr != NULL) {
   3138 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3139 		    (enaddr[3] << 24);
   3140 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3141 		ral_hi |= RAL_AV;
   3142 	} else {
   3143 		ral_lo = 0;
   3144 		ral_hi = 0;
   3145 	}
   3146 
   3147 	if (sc->sc_type >= WM_T_82544) {
   3148 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3149 		    ral_lo);
   3150 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3151 		    ral_hi);
   3152 	} else {
   3153 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3154 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3155 	}
   3156 }
   3157 
   3158 /*
   3159  * wm_mchash:
   3160  *
   3161  *	Compute the hash of the multicast address for the 4096-bit
   3162  *	multicast filter.
   3163  */
   3164 static uint32_t
   3165 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3166 {
   3167 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3168 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3169 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3170 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3171 	uint32_t hash;
   3172 
   3173 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3174 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3175 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3176 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3177 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3178 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3179 		return (hash & 0x3ff);
   3180 	}
   3181 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3182 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3183 
   3184 	return (hash & 0xfff);
   3185 }
   3186 
   3187 /*
   3188  * wm_set_filter:
   3189  *
   3190  *	Set up the receive filter.
   3191  */
   3192 static void
   3193 wm_set_filter(struct wm_softc *sc)
   3194 {
   3195 	struct ethercom *ec = &sc->sc_ethercom;
   3196 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3197 	struct ether_multi *enm;
   3198 	struct ether_multistep step;
   3199 	bus_addr_t mta_reg;
   3200 	uint32_t hash, reg, bit;
   3201 	int i, size, ralmax;
   3202 
   3203 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3204 		device_xname(sc->sc_dev), __func__));
   3205 
   3206 	if (sc->sc_type >= WM_T_82544)
   3207 		mta_reg = WMREG_CORDOVA_MTA;
   3208 	else
   3209 		mta_reg = WMREG_MTA;
   3210 
   3211 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3212 
   3213 	if (ifp->if_flags & IFF_BROADCAST)
   3214 		sc->sc_rctl |= RCTL_BAM;
   3215 	if (ifp->if_flags & IFF_PROMISC) {
   3216 		sc->sc_rctl |= RCTL_UPE;
   3217 		goto allmulti;
   3218 	}
   3219 
   3220 	/*
   3221 	 * Set the station address in the first RAL slot, and
   3222 	 * clear the remaining slots.
   3223 	 */
   3224 	if (sc->sc_type == WM_T_ICH8)
   3225 		size = WM_RAL_TABSIZE_ICH8 -1;
   3226 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3227 	    || (sc->sc_type == WM_T_PCH))
   3228 		size = WM_RAL_TABSIZE_ICH8;
   3229 	else if (sc->sc_type == WM_T_PCH2)
   3230 		size = WM_RAL_TABSIZE_PCH2;
   3231 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3232 		size = WM_RAL_TABSIZE_PCH_LPT;
   3233 	else if (sc->sc_type == WM_T_82575)
   3234 		size = WM_RAL_TABSIZE_82575;
   3235 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3236 		size = WM_RAL_TABSIZE_82576;
   3237 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3238 		size = WM_RAL_TABSIZE_I350;
   3239 	else
   3240 		size = WM_RAL_TABSIZE;
   3241 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3242 
   3243 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3244 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3245 		switch (i) {
   3246 		case 0:
   3247 			/* We can use all entries */
   3248 			ralmax = size;
   3249 			break;
   3250 		case 1:
   3251 			/* Only RAR[0] */
   3252 			ralmax = 1;
   3253 			break;
   3254 		default:
   3255 			/* available SHRA + RAR[0] */
   3256 			ralmax = i + 1;
   3257 		}
   3258 	} else
   3259 		ralmax = size;
   3260 	for (i = 1; i < size; i++) {
   3261 		if (i < ralmax)
   3262 			wm_set_ral(sc, NULL, i);
   3263 	}
   3264 
   3265 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3266 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3267 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3268 	    || (sc->sc_type == WM_T_PCH_SPT))
   3269 		size = WM_ICH8_MC_TABSIZE;
   3270 	else
   3271 		size = WM_MC_TABSIZE;
   3272 	/* Clear out the multicast table. */
   3273 	for (i = 0; i < size; i++)
   3274 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3275 
   3276 	ETHER_LOCK(ec);
   3277 	ETHER_FIRST_MULTI(step, ec, enm);
   3278 	while (enm != NULL) {
   3279 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3280 			ETHER_UNLOCK(ec);
   3281 			/*
   3282 			 * We must listen to a range of multicast addresses.
   3283 			 * For now, just accept all multicasts, rather than
   3284 			 * trying to set only those filter bits needed to match
   3285 			 * the range.  (At this time, the only use of address
   3286 			 * ranges is for IP multicast routing, for which the
   3287 			 * range is big enough to require all bits set.)
   3288 			 */
   3289 			goto allmulti;
   3290 		}
   3291 
   3292 		hash = wm_mchash(sc, enm->enm_addrlo);
   3293 
   3294 		reg = (hash >> 5);
   3295 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3296 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3297 		    || (sc->sc_type == WM_T_PCH2)
   3298 		    || (sc->sc_type == WM_T_PCH_LPT)
   3299 		    || (sc->sc_type == WM_T_PCH_SPT))
   3300 			reg &= 0x1f;
   3301 		else
   3302 			reg &= 0x7f;
   3303 		bit = hash & 0x1f;
   3304 
   3305 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3306 		hash |= 1U << bit;
   3307 
   3308 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3309 			/*
   3310 			 * 82544 Errata 9: Certain register cannot be written
   3311 			 * with particular alignments in PCI-X bus operation
   3312 			 * (FCAH, MTA and VFTA).
   3313 			 */
   3314 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3315 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3316 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3317 		} else
   3318 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3319 
   3320 		ETHER_NEXT_MULTI(step, enm);
   3321 	}
   3322 	ETHER_UNLOCK(ec);
   3323 
   3324 	ifp->if_flags &= ~IFF_ALLMULTI;
   3325 	goto setit;
   3326 
   3327  allmulti:
   3328 	ifp->if_flags |= IFF_ALLMULTI;
   3329 	sc->sc_rctl |= RCTL_MPE;
   3330 
   3331  setit:
   3332 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3333 }
   3334 
   3335 /* Reset and init related */
   3336 
   3337 static void
   3338 wm_set_vlan(struct wm_softc *sc)
   3339 {
   3340 
   3341 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3342 		device_xname(sc->sc_dev), __func__));
   3343 
   3344 	/* Deal with VLAN enables. */
   3345 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3346 		sc->sc_ctrl |= CTRL_VME;
   3347 	else
   3348 		sc->sc_ctrl &= ~CTRL_VME;
   3349 
   3350 	/* Write the control registers. */
   3351 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3352 }
   3353 
   3354 static void
   3355 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3356 {
   3357 	uint32_t gcr;
   3358 	pcireg_t ctrl2;
   3359 
   3360 	gcr = CSR_READ(sc, WMREG_GCR);
   3361 
   3362 	/* Only take action if timeout value is defaulted to 0 */
   3363 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3364 		goto out;
   3365 
   3366 	if ((gcr & GCR_CAP_VER2) == 0) {
   3367 		gcr |= GCR_CMPL_TMOUT_10MS;
   3368 		goto out;
   3369 	}
   3370 
   3371 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3372 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3373 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3374 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3375 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3376 
   3377 out:
   3378 	/* Disable completion timeout resend */
   3379 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3380 
   3381 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3382 }
   3383 
   3384 void
   3385 wm_get_auto_rd_done(struct wm_softc *sc)
   3386 {
   3387 	int i;
   3388 
   3389 	/* wait for eeprom to reload */
   3390 	switch (sc->sc_type) {
   3391 	case WM_T_82571:
   3392 	case WM_T_82572:
   3393 	case WM_T_82573:
   3394 	case WM_T_82574:
   3395 	case WM_T_82583:
   3396 	case WM_T_82575:
   3397 	case WM_T_82576:
   3398 	case WM_T_82580:
   3399 	case WM_T_I350:
   3400 	case WM_T_I354:
   3401 	case WM_T_I210:
   3402 	case WM_T_I211:
   3403 	case WM_T_80003:
   3404 	case WM_T_ICH8:
   3405 	case WM_T_ICH9:
   3406 		for (i = 0; i < 10; i++) {
   3407 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3408 				break;
   3409 			delay(1000);
   3410 		}
   3411 		if (i == 10) {
   3412 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3413 			    "complete\n", device_xname(sc->sc_dev));
   3414 		}
   3415 		break;
   3416 	default:
   3417 		break;
   3418 	}
   3419 }
   3420 
   3421 void
   3422 wm_lan_init_done(struct wm_softc *sc)
   3423 {
   3424 	uint32_t reg = 0;
   3425 	int i;
   3426 
   3427 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3428 		device_xname(sc->sc_dev), __func__));
   3429 
   3430 	/* Wait for eeprom to reload */
   3431 	switch (sc->sc_type) {
   3432 	case WM_T_ICH10:
   3433 	case WM_T_PCH:
   3434 	case WM_T_PCH2:
   3435 	case WM_T_PCH_LPT:
   3436 	case WM_T_PCH_SPT:
   3437 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3438 			reg = CSR_READ(sc, WMREG_STATUS);
   3439 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3440 				break;
   3441 			delay(100);
   3442 		}
   3443 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3444 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3445 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3446 		}
   3447 		break;
   3448 	default:
   3449 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3450 		    __func__);
   3451 		break;
   3452 	}
   3453 
   3454 	reg &= ~STATUS_LAN_INIT_DONE;
   3455 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3456 }
   3457 
   3458 void
   3459 wm_get_cfg_done(struct wm_softc *sc)
   3460 {
   3461 	int mask;
   3462 	uint32_t reg;
   3463 	int i;
   3464 
   3465 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3466 		device_xname(sc->sc_dev), __func__));
   3467 
   3468 	/* Wait for eeprom to reload */
   3469 	switch (sc->sc_type) {
   3470 	case WM_T_82542_2_0:
   3471 	case WM_T_82542_2_1:
   3472 		/* null */
   3473 		break;
   3474 	case WM_T_82543:
   3475 	case WM_T_82544:
   3476 	case WM_T_82540:
   3477 	case WM_T_82545:
   3478 	case WM_T_82545_3:
   3479 	case WM_T_82546:
   3480 	case WM_T_82546_3:
   3481 	case WM_T_82541:
   3482 	case WM_T_82541_2:
   3483 	case WM_T_82547:
   3484 	case WM_T_82547_2:
   3485 	case WM_T_82573:
   3486 	case WM_T_82574:
   3487 	case WM_T_82583:
   3488 		/* generic */
   3489 		delay(10*1000);
   3490 		break;
   3491 	case WM_T_80003:
   3492 	case WM_T_82571:
   3493 	case WM_T_82572:
   3494 	case WM_T_82575:
   3495 	case WM_T_82576:
   3496 	case WM_T_82580:
   3497 	case WM_T_I350:
   3498 	case WM_T_I354:
   3499 	case WM_T_I210:
   3500 	case WM_T_I211:
   3501 		if (sc->sc_type == WM_T_82571) {
   3502 			/* Only 82571 shares port 0 */
   3503 			mask = EEMNGCTL_CFGDONE_0;
   3504 		} else
   3505 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3506 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3507 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3508 				break;
   3509 			delay(1000);
   3510 		}
   3511 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3512 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3513 				device_xname(sc->sc_dev), __func__));
   3514 		}
   3515 		break;
   3516 	case WM_T_ICH8:
   3517 	case WM_T_ICH9:
   3518 	case WM_T_ICH10:
   3519 	case WM_T_PCH:
   3520 	case WM_T_PCH2:
   3521 	case WM_T_PCH_LPT:
   3522 	case WM_T_PCH_SPT:
   3523 		delay(10*1000);
   3524 		if (sc->sc_type >= WM_T_ICH10)
   3525 			wm_lan_init_done(sc);
   3526 		else
   3527 			wm_get_auto_rd_done(sc);
   3528 
   3529 		reg = CSR_READ(sc, WMREG_STATUS);
   3530 		if ((reg & STATUS_PHYRA) != 0)
   3531 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3532 		break;
   3533 	default:
   3534 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3535 		    __func__);
   3536 		break;
   3537 	}
   3538 }
   3539 
   3540 /* Init hardware bits */
   3541 void
   3542 wm_initialize_hardware_bits(struct wm_softc *sc)
   3543 {
   3544 	uint32_t tarc0, tarc1, reg;
   3545 
   3546 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3547 		device_xname(sc->sc_dev), __func__));
   3548 
   3549 	/* For 82571 variant, 80003 and ICHs */
   3550 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3551 	    || (sc->sc_type >= WM_T_80003)) {
   3552 
   3553 		/* Transmit Descriptor Control 0 */
   3554 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3555 		reg |= TXDCTL_COUNT_DESC;
   3556 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3557 
   3558 		/* Transmit Descriptor Control 1 */
   3559 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3560 		reg |= TXDCTL_COUNT_DESC;
   3561 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3562 
   3563 		/* TARC0 */
   3564 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3565 		switch (sc->sc_type) {
   3566 		case WM_T_82571:
   3567 		case WM_T_82572:
   3568 		case WM_T_82573:
   3569 		case WM_T_82574:
   3570 		case WM_T_82583:
   3571 		case WM_T_80003:
   3572 			/* Clear bits 30..27 */
   3573 			tarc0 &= ~__BITS(30, 27);
   3574 			break;
   3575 		default:
   3576 			break;
   3577 		}
   3578 
   3579 		switch (sc->sc_type) {
   3580 		case WM_T_82571:
   3581 		case WM_T_82572:
   3582 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3583 
   3584 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3585 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3586 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3587 			/* 8257[12] Errata No.7 */
   3588 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3589 
   3590 			/* TARC1 bit 28 */
   3591 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3592 				tarc1 &= ~__BIT(28);
   3593 			else
   3594 				tarc1 |= __BIT(28);
   3595 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3596 
   3597 			/*
   3598 			 * 8257[12] Errata No.13
   3599 			 * Disable Dyamic Clock Gating.
   3600 			 */
   3601 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3602 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3603 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3604 			break;
   3605 		case WM_T_82573:
   3606 		case WM_T_82574:
   3607 		case WM_T_82583:
   3608 			if ((sc->sc_type == WM_T_82574)
   3609 			    || (sc->sc_type == WM_T_82583))
   3610 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3611 
   3612 			/* Extended Device Control */
   3613 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3614 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3615 			reg |= __BIT(22);	/* Set bit 22 */
   3616 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3617 
   3618 			/* Device Control */
   3619 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3620 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3621 
   3622 			/* PCIe Control Register */
   3623 			/*
   3624 			 * 82573 Errata (unknown).
   3625 			 *
   3626 			 * 82574 Errata 25 and 82583 Errata 12
   3627 			 * "Dropped Rx Packets":
   3628 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3629 			 */
   3630 			reg = CSR_READ(sc, WMREG_GCR);
   3631 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3632 			CSR_WRITE(sc, WMREG_GCR, reg);
   3633 
   3634 			if ((sc->sc_type == WM_T_82574)
   3635 			    || (sc->sc_type == WM_T_82583)) {
   3636 				/*
   3637 				 * Document says this bit must be set for
   3638 				 * proper operation.
   3639 				 */
   3640 				reg = CSR_READ(sc, WMREG_GCR);
   3641 				reg |= __BIT(22);
   3642 				CSR_WRITE(sc, WMREG_GCR, reg);
   3643 
   3644 				/*
   3645 				 * Apply workaround for hardware errata
   3646 				 * documented in errata docs Fixes issue where
   3647 				 * some error prone or unreliable PCIe
   3648 				 * completions are occurring, particularly
   3649 				 * with ASPM enabled. Without fix, issue can
   3650 				 * cause Tx timeouts.
   3651 				 */
   3652 				reg = CSR_READ(sc, WMREG_GCR2);
   3653 				reg |= __BIT(0);
   3654 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3655 			}
   3656 			break;
   3657 		case WM_T_80003:
   3658 			/* TARC0 */
   3659 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3660 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3661 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3662 
   3663 			/* TARC1 bit 28 */
   3664 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3665 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3666 				tarc1 &= ~__BIT(28);
   3667 			else
   3668 				tarc1 |= __BIT(28);
   3669 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3670 			break;
   3671 		case WM_T_ICH8:
   3672 		case WM_T_ICH9:
   3673 		case WM_T_ICH10:
   3674 		case WM_T_PCH:
   3675 		case WM_T_PCH2:
   3676 		case WM_T_PCH_LPT:
   3677 		case WM_T_PCH_SPT:
   3678 			/* TARC0 */
   3679 			if ((sc->sc_type == WM_T_ICH8)
   3680 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3681 				/* Set TARC0 bits 29 and 28 */
   3682 				tarc0 |= __BITS(29, 28);
   3683 			}
   3684 			/* Set TARC0 bits 23,24,26,27 */
   3685 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3686 
   3687 			/* CTRL_EXT */
   3688 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3689 			reg |= __BIT(22);	/* Set bit 22 */
   3690 			/*
   3691 			 * Enable PHY low-power state when MAC is at D3
   3692 			 * w/o WoL
   3693 			 */
   3694 			if (sc->sc_type >= WM_T_PCH)
   3695 				reg |= CTRL_EXT_PHYPDEN;
   3696 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3697 
   3698 			/* TARC1 */
   3699 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3700 			/* bit 28 */
   3701 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3702 				tarc1 &= ~__BIT(28);
   3703 			else
   3704 				tarc1 |= __BIT(28);
   3705 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3706 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3707 
   3708 			/* Device Status */
   3709 			if (sc->sc_type == WM_T_ICH8) {
   3710 				reg = CSR_READ(sc, WMREG_STATUS);
   3711 				reg &= ~__BIT(31);
   3712 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3713 
   3714 			}
   3715 
   3716 			/* IOSFPC */
   3717 			if (sc->sc_type == WM_T_PCH_SPT) {
   3718 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3719 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3720 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3721 			}
   3722 			/*
   3723 			 * Work-around descriptor data corruption issue during
   3724 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3725 			 * capability.
   3726 			 */
   3727 			reg = CSR_READ(sc, WMREG_RFCTL);
   3728 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3729 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3730 			break;
   3731 		default:
   3732 			break;
   3733 		}
   3734 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3735 
   3736 		switch (sc->sc_type) {
   3737 		/*
   3738 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3739 		 * Avoid RSS Hash Value bug.
   3740 		 */
   3741 		case WM_T_82571:
   3742 		case WM_T_82572:
   3743 		case WM_T_82573:
   3744 		case WM_T_80003:
   3745 		case WM_T_ICH8:
   3746 			reg = CSR_READ(sc, WMREG_RFCTL);
   3747 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3748 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3749 			break;
   3750 		case WM_T_82574:
   3751 			/* use extened Rx descriptor. */
   3752 			reg = CSR_READ(sc, WMREG_RFCTL);
   3753 			reg |= WMREG_RFCTL_EXSTEN;
   3754 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3755 			break;
   3756 		default:
   3757 			break;
   3758 		}
   3759 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   3760 		/*
   3761 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3762 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3763 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3764 		 * Correctly by the Device"
   3765 		 *
   3766 		 * I354(C2000) Errata AVR53:
   3767 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3768 		 * Hang"
   3769 		 */
   3770 		reg = CSR_READ(sc, WMREG_RFCTL);
   3771 		reg |= WMREG_RFCTL_IPV6EXDIS;
   3772 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   3773 	}
   3774 }
   3775 
   3776 static uint32_t
   3777 wm_rxpbs_adjust_82580(uint32_t val)
   3778 {
   3779 	uint32_t rv = 0;
   3780 
   3781 	if (val < __arraycount(wm_82580_rxpbs_table))
   3782 		rv = wm_82580_rxpbs_table[val];
   3783 
   3784 	return rv;
   3785 }
   3786 
   3787 /*
   3788  * wm_reset_phy:
   3789  *
   3790  *	generic PHY reset function.
   3791  *	Same as e1000_phy_hw_reset_generic()
   3792  */
   3793 static void
   3794 wm_reset_phy(struct wm_softc *sc)
   3795 {
   3796 	uint32_t reg;
   3797 
   3798 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3799 		device_xname(sc->sc_dev), __func__));
   3800 	if (wm_phy_resetisblocked(sc))
   3801 		return;
   3802 
   3803 	sc->phy.acquire(sc);
   3804 
   3805 	reg = CSR_READ(sc, WMREG_CTRL);
   3806 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3807 	CSR_WRITE_FLUSH(sc);
   3808 
   3809 	delay(sc->phy.reset_delay_us);
   3810 
   3811 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3812 	CSR_WRITE_FLUSH(sc);
   3813 
   3814 	delay(150);
   3815 
   3816 	sc->phy.release(sc);
   3817 
   3818 	wm_get_cfg_done(sc);
   3819 }
   3820 
   3821 static void
   3822 wm_flush_desc_rings(struct wm_softc *sc)
   3823 {
   3824 	pcireg_t preg;
   3825 	uint32_t reg;
   3826 	int nexttx;
   3827 
   3828 	/* First, disable MULR fix in FEXTNVM11 */
   3829 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3830 	reg |= FEXTNVM11_DIS_MULRFIX;
   3831 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3832 
   3833 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3834 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3835 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3836 		struct wm_txqueue *txq;
   3837 		wiseman_txdesc_t *txd;
   3838 
   3839 		/* TX */
   3840 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3841 		    device_xname(sc->sc_dev), preg, reg);
   3842 		reg = CSR_READ(sc, WMREG_TCTL);
   3843 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3844 
   3845 		txq = &sc->sc_queue[0].wmq_txq;
   3846 		nexttx = txq->txq_next;
   3847 		txd = &txq->txq_descs[nexttx];
   3848 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3849 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3850 		txd->wtx_fields.wtxu_status = 0;
   3851 		txd->wtx_fields.wtxu_options = 0;
   3852 		txd->wtx_fields.wtxu_vlan = 0;
   3853 
   3854 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3855 			BUS_SPACE_BARRIER_WRITE);
   3856 
   3857 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3858 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3859 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3860 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3861 		delay(250);
   3862 	}
   3863 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3864 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3865 		uint32_t rctl;
   3866 
   3867 		/* RX */
   3868 		printf("%s: Need RX flush (reg = %08x)\n",
   3869 		    device_xname(sc->sc_dev), preg);
   3870 		rctl = CSR_READ(sc, WMREG_RCTL);
   3871 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3872 		CSR_WRITE_FLUSH(sc);
   3873 		delay(150);
   3874 
   3875 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3876 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3877 		reg &= 0xffffc000;
   3878 		/*
   3879 		 * update thresholds: prefetch threshold to 31, host threshold
   3880 		 * to 1 and make sure the granularity is "descriptors" and not
   3881 		 * "cache lines"
   3882 		 */
   3883 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3884 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3885 
   3886 		/*
   3887 		 * momentarily enable the RX ring for the changes to take
   3888 		 * effect
   3889 		 */
   3890 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3891 		CSR_WRITE_FLUSH(sc);
   3892 		delay(150);
   3893 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3894 	}
   3895 }
   3896 
   3897 /*
   3898  * wm_reset:
   3899  *
   3900  *	Reset the i82542 chip.
   3901  */
   3902 static void
   3903 wm_reset(struct wm_softc *sc)
   3904 {
   3905 	int phy_reset = 0;
   3906 	int i, error = 0;
   3907 	uint32_t reg;
   3908 
   3909 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3910 		device_xname(sc->sc_dev), __func__));
   3911 	KASSERT(sc->sc_type != 0);
   3912 
   3913 	/*
   3914 	 * Allocate on-chip memory according to the MTU size.
   3915 	 * The Packet Buffer Allocation register must be written
   3916 	 * before the chip is reset.
   3917 	 */
   3918 	switch (sc->sc_type) {
   3919 	case WM_T_82547:
   3920 	case WM_T_82547_2:
   3921 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3922 		    PBA_22K : PBA_30K;
   3923 		for (i = 0; i < sc->sc_nqueues; i++) {
   3924 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3925 			txq->txq_fifo_head = 0;
   3926 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3927 			txq->txq_fifo_size =
   3928 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3929 			txq->txq_fifo_stall = 0;
   3930 		}
   3931 		break;
   3932 	case WM_T_82571:
   3933 	case WM_T_82572:
   3934 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3935 	case WM_T_80003:
   3936 		sc->sc_pba = PBA_32K;
   3937 		break;
   3938 	case WM_T_82573:
   3939 		sc->sc_pba = PBA_12K;
   3940 		break;
   3941 	case WM_T_82574:
   3942 	case WM_T_82583:
   3943 		sc->sc_pba = PBA_20K;
   3944 		break;
   3945 	case WM_T_82576:
   3946 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3947 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3948 		break;
   3949 	case WM_T_82580:
   3950 	case WM_T_I350:
   3951 	case WM_T_I354:
   3952 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3953 		break;
   3954 	case WM_T_I210:
   3955 	case WM_T_I211:
   3956 		sc->sc_pba = PBA_34K;
   3957 		break;
   3958 	case WM_T_ICH8:
   3959 		/* Workaround for a bit corruption issue in FIFO memory */
   3960 		sc->sc_pba = PBA_8K;
   3961 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3962 		break;
   3963 	case WM_T_ICH9:
   3964 	case WM_T_ICH10:
   3965 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3966 		    PBA_14K : PBA_10K;
   3967 		break;
   3968 	case WM_T_PCH:
   3969 	case WM_T_PCH2:
   3970 	case WM_T_PCH_LPT:
   3971 	case WM_T_PCH_SPT:
   3972 		sc->sc_pba = PBA_26K;
   3973 		break;
   3974 	default:
   3975 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3976 		    PBA_40K : PBA_48K;
   3977 		break;
   3978 	}
   3979 	/*
   3980 	 * Only old or non-multiqueue devices have the PBA register
   3981 	 * XXX Need special handling for 82575.
   3982 	 */
   3983 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3984 	    || (sc->sc_type == WM_T_82575))
   3985 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3986 
   3987 	/* Prevent the PCI-E bus from sticking */
   3988 	if (sc->sc_flags & WM_F_PCIE) {
   3989 		int timeout = 800;
   3990 
   3991 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3992 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3993 
   3994 		while (timeout--) {
   3995 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3996 			    == 0)
   3997 				break;
   3998 			delay(100);
   3999 		}
   4000 	}
   4001 
   4002 	/* Set the completion timeout for interface */
   4003 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4004 	    || (sc->sc_type == WM_T_82580)
   4005 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4006 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4007 		wm_set_pcie_completion_timeout(sc);
   4008 
   4009 	/* Clear interrupt */
   4010 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4011 	if (sc->sc_nintrs > 1) {
   4012 		if (sc->sc_type != WM_T_82574) {
   4013 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4014 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4015 		} else {
   4016 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4017 		}
   4018 	}
   4019 
   4020 	/* Stop the transmit and receive processes. */
   4021 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4022 	sc->sc_rctl &= ~RCTL_EN;
   4023 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4024 	CSR_WRITE_FLUSH(sc);
   4025 
   4026 	/* XXX set_tbi_sbp_82543() */
   4027 
   4028 	delay(10*1000);
   4029 
   4030 	/* Must acquire the MDIO ownership before MAC reset */
   4031 	switch (sc->sc_type) {
   4032 	case WM_T_82573:
   4033 	case WM_T_82574:
   4034 	case WM_T_82583:
   4035 		error = wm_get_hw_semaphore_82573(sc);
   4036 		break;
   4037 	default:
   4038 		break;
   4039 	}
   4040 
   4041 	/*
   4042 	 * 82541 Errata 29? & 82547 Errata 28?
   4043 	 * See also the description about PHY_RST bit in CTRL register
   4044 	 * in 8254x_GBe_SDM.pdf.
   4045 	 */
   4046 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4047 		CSR_WRITE(sc, WMREG_CTRL,
   4048 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4049 		CSR_WRITE_FLUSH(sc);
   4050 		delay(5000);
   4051 	}
   4052 
   4053 	switch (sc->sc_type) {
   4054 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4055 	case WM_T_82541:
   4056 	case WM_T_82541_2:
   4057 	case WM_T_82547:
   4058 	case WM_T_82547_2:
   4059 		/*
   4060 		 * On some chipsets, a reset through a memory-mapped write
   4061 		 * cycle can cause the chip to reset before completing the
   4062 		 * write cycle.  This causes major headache that can be
   4063 		 * avoided by issuing the reset via indirect register writes
   4064 		 * through I/O space.
   4065 		 *
   4066 		 * So, if we successfully mapped the I/O BAR at attach time,
   4067 		 * use that.  Otherwise, try our luck with a memory-mapped
   4068 		 * reset.
   4069 		 */
   4070 		if (sc->sc_flags & WM_F_IOH_VALID)
   4071 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4072 		else
   4073 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4074 		break;
   4075 	case WM_T_82545_3:
   4076 	case WM_T_82546_3:
   4077 		/* Use the shadow control register on these chips. */
   4078 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4079 		break;
   4080 	case WM_T_80003:
   4081 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4082 		sc->phy.acquire(sc);
   4083 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4084 		sc->phy.release(sc);
   4085 		break;
   4086 	case WM_T_ICH8:
   4087 	case WM_T_ICH9:
   4088 	case WM_T_ICH10:
   4089 	case WM_T_PCH:
   4090 	case WM_T_PCH2:
   4091 	case WM_T_PCH_LPT:
   4092 	case WM_T_PCH_SPT:
   4093 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4094 		if (wm_phy_resetisblocked(sc) == false) {
   4095 			/*
   4096 			 * Gate automatic PHY configuration by hardware on
   4097 			 * non-managed 82579
   4098 			 */
   4099 			if ((sc->sc_type == WM_T_PCH2)
   4100 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4101 				== 0))
   4102 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4103 
   4104 			reg |= CTRL_PHY_RESET;
   4105 			phy_reset = 1;
   4106 		} else
   4107 			printf("XXX reset is blocked!!!\n");
   4108 		sc->phy.acquire(sc);
   4109 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4110 		/* Don't insert a completion barrier when reset */
   4111 		delay(20*1000);
   4112 		mutex_exit(sc->sc_ich_phymtx);
   4113 		break;
   4114 	case WM_T_82580:
   4115 	case WM_T_I350:
   4116 	case WM_T_I354:
   4117 	case WM_T_I210:
   4118 	case WM_T_I211:
   4119 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4120 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4121 			CSR_WRITE_FLUSH(sc);
   4122 		delay(5000);
   4123 		break;
   4124 	case WM_T_82542_2_0:
   4125 	case WM_T_82542_2_1:
   4126 	case WM_T_82543:
   4127 	case WM_T_82540:
   4128 	case WM_T_82545:
   4129 	case WM_T_82546:
   4130 	case WM_T_82571:
   4131 	case WM_T_82572:
   4132 	case WM_T_82573:
   4133 	case WM_T_82574:
   4134 	case WM_T_82575:
   4135 	case WM_T_82576:
   4136 	case WM_T_82583:
   4137 	default:
   4138 		/* Everything else can safely use the documented method. */
   4139 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4140 		break;
   4141 	}
   4142 
   4143 	/* Must release the MDIO ownership after MAC reset */
   4144 	switch (sc->sc_type) {
   4145 	case WM_T_82573:
   4146 	case WM_T_82574:
   4147 	case WM_T_82583:
   4148 		if (error == 0)
   4149 			wm_put_hw_semaphore_82573(sc);
   4150 		break;
   4151 	default:
   4152 		break;
   4153 	}
   4154 
   4155 	if (phy_reset != 0)
   4156 		wm_get_cfg_done(sc);
   4157 
   4158 	/* reload EEPROM */
   4159 	switch (sc->sc_type) {
   4160 	case WM_T_82542_2_0:
   4161 	case WM_T_82542_2_1:
   4162 	case WM_T_82543:
   4163 	case WM_T_82544:
   4164 		delay(10);
   4165 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4166 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4167 		CSR_WRITE_FLUSH(sc);
   4168 		delay(2000);
   4169 		break;
   4170 	case WM_T_82540:
   4171 	case WM_T_82545:
   4172 	case WM_T_82545_3:
   4173 	case WM_T_82546:
   4174 	case WM_T_82546_3:
   4175 		delay(5*1000);
   4176 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4177 		break;
   4178 	case WM_T_82541:
   4179 	case WM_T_82541_2:
   4180 	case WM_T_82547:
   4181 	case WM_T_82547_2:
   4182 		delay(20000);
   4183 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4184 		break;
   4185 	case WM_T_82571:
   4186 	case WM_T_82572:
   4187 	case WM_T_82573:
   4188 	case WM_T_82574:
   4189 	case WM_T_82583:
   4190 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4191 			delay(10);
   4192 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4193 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4194 			CSR_WRITE_FLUSH(sc);
   4195 		}
   4196 		/* check EECD_EE_AUTORD */
   4197 		wm_get_auto_rd_done(sc);
   4198 		/*
   4199 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4200 		 * is set.
   4201 		 */
   4202 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4203 		    || (sc->sc_type == WM_T_82583))
   4204 			delay(25*1000);
   4205 		break;
   4206 	case WM_T_82575:
   4207 	case WM_T_82576:
   4208 	case WM_T_82580:
   4209 	case WM_T_I350:
   4210 	case WM_T_I354:
   4211 	case WM_T_I210:
   4212 	case WM_T_I211:
   4213 	case WM_T_80003:
   4214 		/* check EECD_EE_AUTORD */
   4215 		wm_get_auto_rd_done(sc);
   4216 		break;
   4217 	case WM_T_ICH8:
   4218 	case WM_T_ICH9:
   4219 	case WM_T_ICH10:
   4220 	case WM_T_PCH:
   4221 	case WM_T_PCH2:
   4222 	case WM_T_PCH_LPT:
   4223 	case WM_T_PCH_SPT:
   4224 		break;
   4225 	default:
   4226 		panic("%s: unknown type\n", __func__);
   4227 	}
   4228 
   4229 	/* Check whether EEPROM is present or not */
   4230 	switch (sc->sc_type) {
   4231 	case WM_T_82575:
   4232 	case WM_T_82576:
   4233 	case WM_T_82580:
   4234 	case WM_T_I350:
   4235 	case WM_T_I354:
   4236 	case WM_T_ICH8:
   4237 	case WM_T_ICH9:
   4238 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4239 			/* Not found */
   4240 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4241 			if (sc->sc_type == WM_T_82575)
   4242 				wm_reset_init_script_82575(sc);
   4243 		}
   4244 		break;
   4245 	default:
   4246 		break;
   4247 	}
   4248 
   4249 	if ((sc->sc_type == WM_T_82580)
   4250 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4251 		/* clear global device reset status bit */
   4252 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4253 	}
   4254 
   4255 	/* Clear any pending interrupt events. */
   4256 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4257 	reg = CSR_READ(sc, WMREG_ICR);
   4258 	if (sc->sc_nintrs > 1) {
   4259 		if (sc->sc_type != WM_T_82574) {
   4260 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4261 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4262 		} else
   4263 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4264 	}
   4265 
   4266 	/* reload sc_ctrl */
   4267 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4268 
   4269 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4270 		wm_set_eee_i350(sc);
   4271 
   4272 	/* Clear the host wakeup bit after lcd reset */
   4273 	if (sc->sc_type >= WM_T_PCH) {
   4274 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4275 		    BM_PORT_GEN_CFG);
   4276 		reg &= ~BM_WUC_HOST_WU_BIT;
   4277 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4278 		    BM_PORT_GEN_CFG, reg);
   4279 	}
   4280 
   4281 	/*
   4282 	 * For PCH, this write will make sure that any noise will be detected
   4283 	 * as a CRC error and be dropped rather than show up as a bad packet
   4284 	 * to the DMA engine
   4285 	 */
   4286 	if (sc->sc_type == WM_T_PCH)
   4287 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4288 
   4289 	if (sc->sc_type >= WM_T_82544)
   4290 		CSR_WRITE(sc, WMREG_WUC, 0);
   4291 
   4292 	wm_reset_mdicnfg_82580(sc);
   4293 
   4294 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4295 		wm_pll_workaround_i210(sc);
   4296 }
   4297 
   4298 /*
   4299  * wm_add_rxbuf:
   4300  *
   4301  *	Add a receive buffer to the indiciated descriptor.
   4302  */
   4303 static int
   4304 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4305 {
   4306 	struct wm_softc *sc = rxq->rxq_sc;
   4307 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4308 	struct mbuf *m;
   4309 	int error;
   4310 
   4311 	KASSERT(mutex_owned(rxq->rxq_lock));
   4312 
   4313 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4314 	if (m == NULL)
   4315 		return ENOBUFS;
   4316 
   4317 	MCLGET(m, M_DONTWAIT);
   4318 	if ((m->m_flags & M_EXT) == 0) {
   4319 		m_freem(m);
   4320 		return ENOBUFS;
   4321 	}
   4322 
   4323 	if (rxs->rxs_mbuf != NULL)
   4324 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4325 
   4326 	rxs->rxs_mbuf = m;
   4327 
   4328 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4329 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4330 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4331 	if (error) {
   4332 		/* XXX XXX XXX */
   4333 		aprint_error_dev(sc->sc_dev,
   4334 		    "unable to load rx DMA map %d, error = %d\n",
   4335 		    idx, error);
   4336 		panic("wm_add_rxbuf");
   4337 	}
   4338 
   4339 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4340 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4341 
   4342 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4343 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4344 			wm_init_rxdesc(rxq, idx);
   4345 	} else
   4346 		wm_init_rxdesc(rxq, idx);
   4347 
   4348 	return 0;
   4349 }
   4350 
   4351 /*
   4352  * wm_rxdrain:
   4353  *
   4354  *	Drain the receive queue.
   4355  */
   4356 static void
   4357 wm_rxdrain(struct wm_rxqueue *rxq)
   4358 {
   4359 	struct wm_softc *sc = rxq->rxq_sc;
   4360 	struct wm_rxsoft *rxs;
   4361 	int i;
   4362 
   4363 	KASSERT(mutex_owned(rxq->rxq_lock));
   4364 
   4365 	for (i = 0; i < WM_NRXDESC; i++) {
   4366 		rxs = &rxq->rxq_soft[i];
   4367 		if (rxs->rxs_mbuf != NULL) {
   4368 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4369 			m_freem(rxs->rxs_mbuf);
   4370 			rxs->rxs_mbuf = NULL;
   4371 		}
   4372 	}
   4373 }
   4374 
   4375 
   4376 /*
   4377  * XXX copy from FreeBSD's sys/net/rss_config.c
   4378  */
   4379 /*
   4380  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4381  * effectiveness may be limited by algorithm choice and available entropy
   4382  * during the boot.
   4383  *
   4384  * XXXRW: And that we don't randomize it yet!
   4385  *
   4386  * This is the default Microsoft RSS specification key which is also
   4387  * the Chelsio T5 firmware default key.
   4388  */
   4389 #define RSS_KEYSIZE 40
   4390 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4391 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4392 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4393 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4394 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4395 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4396 };
   4397 
   4398 /*
   4399  * Caller must pass an array of size sizeof(rss_key).
   4400  *
   4401  * XXX
   4402  * As if_ixgbe may use this function, this function should not be
   4403  * if_wm specific function.
   4404  */
   4405 static void
   4406 wm_rss_getkey(uint8_t *key)
   4407 {
   4408 
   4409 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4410 }
   4411 
   4412 /*
   4413  * Setup registers for RSS.
   4414  *
   4415  * XXX not yet VMDq support
   4416  */
   4417 static void
   4418 wm_init_rss(struct wm_softc *sc)
   4419 {
   4420 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4421 	int i;
   4422 
   4423 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4424 
   4425 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4426 		int qid, reta_ent;
   4427 
   4428 		qid  = i % sc->sc_nqueues;
   4429 		switch(sc->sc_type) {
   4430 		case WM_T_82574:
   4431 			reta_ent = __SHIFTIN(qid,
   4432 			    RETA_ENT_QINDEX_MASK_82574);
   4433 			break;
   4434 		case WM_T_82575:
   4435 			reta_ent = __SHIFTIN(qid,
   4436 			    RETA_ENT_QINDEX1_MASK_82575);
   4437 			break;
   4438 		default:
   4439 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4440 			break;
   4441 		}
   4442 
   4443 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4444 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4445 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4446 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4447 	}
   4448 
   4449 	wm_rss_getkey((uint8_t *)rss_key);
   4450 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4451 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4452 
   4453 	if (sc->sc_type == WM_T_82574)
   4454 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4455 	else
   4456 		mrqc = MRQC_ENABLE_RSS_MQ;
   4457 
   4458 	/*
   4459 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4460 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4461 	 */
   4462 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4463 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4464 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4465 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4466 
   4467 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4468 }
   4469 
   4470 /*
   4471  * Adjust TX and RX queue numbers which the system actulally uses.
   4472  *
   4473  * The numbers are affected by below parameters.
   4474  *     - The nubmer of hardware queues
   4475  *     - The number of MSI-X vectors (= "nvectors" argument)
   4476  *     - ncpu
   4477  */
   4478 static void
   4479 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4480 {
   4481 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4482 
   4483 	if (nvectors < 2) {
   4484 		sc->sc_nqueues = 1;
   4485 		return;
   4486 	}
   4487 
   4488 	switch(sc->sc_type) {
   4489 	case WM_T_82572:
   4490 		hw_ntxqueues = 2;
   4491 		hw_nrxqueues = 2;
   4492 		break;
   4493 	case WM_T_82574:
   4494 		hw_ntxqueues = 2;
   4495 		hw_nrxqueues = 2;
   4496 		break;
   4497 	case WM_T_82575:
   4498 		hw_ntxqueues = 4;
   4499 		hw_nrxqueues = 4;
   4500 		break;
   4501 	case WM_T_82576:
   4502 		hw_ntxqueues = 16;
   4503 		hw_nrxqueues = 16;
   4504 		break;
   4505 	case WM_T_82580:
   4506 	case WM_T_I350:
   4507 	case WM_T_I354:
   4508 		hw_ntxqueues = 8;
   4509 		hw_nrxqueues = 8;
   4510 		break;
   4511 	case WM_T_I210:
   4512 		hw_ntxqueues = 4;
   4513 		hw_nrxqueues = 4;
   4514 		break;
   4515 	case WM_T_I211:
   4516 		hw_ntxqueues = 2;
   4517 		hw_nrxqueues = 2;
   4518 		break;
   4519 		/*
   4520 		 * As below ethernet controllers does not support MSI-X,
   4521 		 * this driver let them not use multiqueue.
   4522 		 *     - WM_T_80003
   4523 		 *     - WM_T_ICH8
   4524 		 *     - WM_T_ICH9
   4525 		 *     - WM_T_ICH10
   4526 		 *     - WM_T_PCH
   4527 		 *     - WM_T_PCH2
   4528 		 *     - WM_T_PCH_LPT
   4529 		 */
   4530 	default:
   4531 		hw_ntxqueues = 1;
   4532 		hw_nrxqueues = 1;
   4533 		break;
   4534 	}
   4535 
   4536 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4537 
   4538 	/*
   4539 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4540 	 * the number of queues used actually.
   4541 	 */
   4542 	if (nvectors < hw_nqueues + 1) {
   4543 		sc->sc_nqueues = nvectors - 1;
   4544 	} else {
   4545 		sc->sc_nqueues = hw_nqueues;
   4546 	}
   4547 
   4548 	/*
   4549 	 * As queues more then cpus cannot improve scaling, we limit
   4550 	 * the number of queues used actually.
   4551 	 */
   4552 	if (ncpu < sc->sc_nqueues)
   4553 		sc->sc_nqueues = ncpu;
   4554 }
   4555 
   4556 /*
   4557  * Both single interrupt MSI and INTx can use this function.
   4558  */
   4559 static int
   4560 wm_setup_legacy(struct wm_softc *sc)
   4561 {
   4562 	pci_chipset_tag_t pc = sc->sc_pc;
   4563 	const char *intrstr = NULL;
   4564 	char intrbuf[PCI_INTRSTR_LEN];
   4565 	int error;
   4566 
   4567 	error = wm_alloc_txrx_queues(sc);
   4568 	if (error) {
   4569 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4570 		    error);
   4571 		return ENOMEM;
   4572 	}
   4573 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4574 	    sizeof(intrbuf));
   4575 #ifdef WM_MPSAFE
   4576 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4577 #endif
   4578 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4579 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4580 	if (sc->sc_ihs[0] == NULL) {
   4581 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4582 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4583 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4584 		return ENOMEM;
   4585 	}
   4586 
   4587 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4588 	sc->sc_nintrs = 1;
   4589 	return 0;
   4590 }
   4591 
   4592 static int
   4593 wm_setup_msix(struct wm_softc *sc)
   4594 {
   4595 	void *vih;
   4596 	kcpuset_t *affinity;
   4597 	int qidx, error, intr_idx, txrx_established;
   4598 	pci_chipset_tag_t pc = sc->sc_pc;
   4599 	const char *intrstr = NULL;
   4600 	char intrbuf[PCI_INTRSTR_LEN];
   4601 	char intr_xname[INTRDEVNAMEBUF];
   4602 
   4603 	if (sc->sc_nqueues < ncpu) {
   4604 		/*
   4605 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4606 		 * interrupts start from CPU#1.
   4607 		 */
   4608 		sc->sc_affinity_offset = 1;
   4609 	} else {
   4610 		/*
   4611 		 * In this case, this device use all CPUs. So, we unify
   4612 		 * affinitied cpu_index to msix vector number for readability.
   4613 		 */
   4614 		sc->sc_affinity_offset = 0;
   4615 	}
   4616 
   4617 	error = wm_alloc_txrx_queues(sc);
   4618 	if (error) {
   4619 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4620 		    error);
   4621 		return ENOMEM;
   4622 	}
   4623 
   4624 	kcpuset_create(&affinity, false);
   4625 	intr_idx = 0;
   4626 
   4627 	/*
   4628 	 * TX and RX
   4629 	 */
   4630 	txrx_established = 0;
   4631 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4632 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4633 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4634 
   4635 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4636 		    sizeof(intrbuf));
   4637 #ifdef WM_MPSAFE
   4638 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4639 		    PCI_INTR_MPSAFE, true);
   4640 #endif
   4641 		memset(intr_xname, 0, sizeof(intr_xname));
   4642 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4643 		    device_xname(sc->sc_dev), qidx);
   4644 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4645 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4646 		if (vih == NULL) {
   4647 			aprint_error_dev(sc->sc_dev,
   4648 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4649 			    intrstr ? " at " : "",
   4650 			    intrstr ? intrstr : "");
   4651 
   4652 			goto fail;
   4653 		}
   4654 		kcpuset_zero(affinity);
   4655 		/* Round-robin affinity */
   4656 		kcpuset_set(affinity, affinity_to);
   4657 		error = interrupt_distribute(vih, affinity, NULL);
   4658 		if (error == 0) {
   4659 			aprint_normal_dev(sc->sc_dev,
   4660 			    "for TX and RX interrupting at %s affinity to %u\n",
   4661 			    intrstr, affinity_to);
   4662 		} else {
   4663 			aprint_normal_dev(sc->sc_dev,
   4664 			    "for TX and RX interrupting at %s\n", intrstr);
   4665 		}
   4666 		sc->sc_ihs[intr_idx] = vih;
   4667 		wmq->wmq_id= qidx;
   4668 		wmq->wmq_intr_idx = intr_idx;
   4669 
   4670 		txrx_established++;
   4671 		intr_idx++;
   4672 	}
   4673 
   4674 	/*
   4675 	 * LINK
   4676 	 */
   4677 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4678 	    sizeof(intrbuf));
   4679 #ifdef WM_MPSAFE
   4680 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4681 #endif
   4682 	memset(intr_xname, 0, sizeof(intr_xname));
   4683 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4684 	    device_xname(sc->sc_dev));
   4685 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4686 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4687 	if (vih == NULL) {
   4688 		aprint_error_dev(sc->sc_dev,
   4689 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4690 		    intrstr ? " at " : "",
   4691 		    intrstr ? intrstr : "");
   4692 
   4693 		goto fail;
   4694 	}
   4695 	/* keep default affinity to LINK interrupt */
   4696 	aprint_normal_dev(sc->sc_dev,
   4697 	    "for LINK interrupting at %s\n", intrstr);
   4698 	sc->sc_ihs[intr_idx] = vih;
   4699 	sc->sc_link_intr_idx = intr_idx;
   4700 
   4701 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4702 	kcpuset_destroy(affinity);
   4703 	return 0;
   4704 
   4705  fail:
   4706 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4707 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4708 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4709 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4710 	}
   4711 
   4712 	kcpuset_destroy(affinity);
   4713 	return ENOMEM;
   4714 }
   4715 
   4716 static void
   4717 wm_turnon(struct wm_softc *sc)
   4718 {
   4719 	int i;
   4720 
   4721 	KASSERT(WM_CORE_LOCKED(sc));
   4722 
   4723 	/*
   4724 	 * must unset stopping flags in ascending order.
   4725 	 */
   4726 	for(i = 0; i < sc->sc_nqueues; i++) {
   4727 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4728 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4729 
   4730 		mutex_enter(txq->txq_lock);
   4731 		txq->txq_stopping = false;
   4732 		mutex_exit(txq->txq_lock);
   4733 
   4734 		mutex_enter(rxq->rxq_lock);
   4735 		rxq->rxq_stopping = false;
   4736 		mutex_exit(rxq->rxq_lock);
   4737 	}
   4738 
   4739 	sc->sc_core_stopping = false;
   4740 }
   4741 
   4742 static void
   4743 wm_turnoff(struct wm_softc *sc)
   4744 {
   4745 	int i;
   4746 
   4747 	KASSERT(WM_CORE_LOCKED(sc));
   4748 
   4749 	sc->sc_core_stopping = true;
   4750 
   4751 	/*
   4752 	 * must set stopping flags in ascending order.
   4753 	 */
   4754 	for(i = 0; i < sc->sc_nqueues; i++) {
   4755 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4756 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4757 
   4758 		mutex_enter(rxq->rxq_lock);
   4759 		rxq->rxq_stopping = true;
   4760 		mutex_exit(rxq->rxq_lock);
   4761 
   4762 		mutex_enter(txq->txq_lock);
   4763 		txq->txq_stopping = true;
   4764 		mutex_exit(txq->txq_lock);
   4765 	}
   4766 }
   4767 
   4768 /*
   4769  * wm_init:		[ifnet interface function]
   4770  *
   4771  *	Initialize the interface.
   4772  */
   4773 static int
   4774 wm_init(struct ifnet *ifp)
   4775 {
   4776 	struct wm_softc *sc = ifp->if_softc;
   4777 	int ret;
   4778 
   4779 	WM_CORE_LOCK(sc);
   4780 	ret = wm_init_locked(ifp);
   4781 	WM_CORE_UNLOCK(sc);
   4782 
   4783 	return ret;
   4784 }
   4785 
   4786 static int
   4787 wm_init_locked(struct ifnet *ifp)
   4788 {
   4789 	struct wm_softc *sc = ifp->if_softc;
   4790 	int i, j, trynum, error = 0;
   4791 	uint32_t reg;
   4792 
   4793 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4794 		device_xname(sc->sc_dev), __func__));
   4795 	KASSERT(WM_CORE_LOCKED(sc));
   4796 
   4797 	/*
   4798 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4799 	 * There is a small but measurable benefit to avoiding the adjusment
   4800 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4801 	 * on such platforms.  One possibility is that the DMA itself is
   4802 	 * slightly more efficient if the front of the entire packet (instead
   4803 	 * of the front of the headers) is aligned.
   4804 	 *
   4805 	 * Note we must always set align_tweak to 0 if we are using
   4806 	 * jumbo frames.
   4807 	 */
   4808 #ifdef __NO_STRICT_ALIGNMENT
   4809 	sc->sc_align_tweak = 0;
   4810 #else
   4811 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4812 		sc->sc_align_tweak = 0;
   4813 	else
   4814 		sc->sc_align_tweak = 2;
   4815 #endif /* __NO_STRICT_ALIGNMENT */
   4816 
   4817 	/* Cancel any pending I/O. */
   4818 	wm_stop_locked(ifp, 0);
   4819 
   4820 	/* update statistics before reset */
   4821 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4822 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4823 
   4824 	/* PCH_SPT hardware workaround */
   4825 	if (sc->sc_type == WM_T_PCH_SPT)
   4826 		wm_flush_desc_rings(sc);
   4827 
   4828 	/* Reset the chip to a known state. */
   4829 	wm_reset(sc);
   4830 
   4831 	/* AMT based hardware can now take control from firmware */
   4832 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4833 		wm_get_hw_control(sc);
   4834 
   4835 	/* Init hardware bits */
   4836 	wm_initialize_hardware_bits(sc);
   4837 
   4838 	/* Reset the PHY. */
   4839 	if (sc->sc_flags & WM_F_HAS_MII)
   4840 		wm_gmii_reset(sc);
   4841 
   4842 	/* Calculate (E)ITR value */
   4843 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4844 		sc->sc_itr = 450;	/* For EITR */
   4845 	} else if (sc->sc_type >= WM_T_82543) {
   4846 		/*
   4847 		 * Set up the interrupt throttling register (units of 256ns)
   4848 		 * Note that a footnote in Intel's documentation says this
   4849 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4850 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4851 		 * that that is also true for the 1024ns units of the other
   4852 		 * interrupt-related timer registers -- so, really, we ought
   4853 		 * to divide this value by 4 when the link speed is low.
   4854 		 *
   4855 		 * XXX implement this division at link speed change!
   4856 		 */
   4857 
   4858 		/*
   4859 		 * For N interrupts/sec, set this value to:
   4860 		 * 1000000000 / (N * 256).  Note that we set the
   4861 		 * absolute and packet timer values to this value
   4862 		 * divided by 4 to get "simple timer" behavior.
   4863 		 */
   4864 
   4865 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4866 	}
   4867 
   4868 	error = wm_init_txrx_queues(sc);
   4869 	if (error)
   4870 		goto out;
   4871 
   4872 	/*
   4873 	 * Clear out the VLAN table -- we don't use it (yet).
   4874 	 */
   4875 	CSR_WRITE(sc, WMREG_VET, 0);
   4876 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4877 		trynum = 10; /* Due to hw errata */
   4878 	else
   4879 		trynum = 1;
   4880 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4881 		for (j = 0; j < trynum; j++)
   4882 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4883 
   4884 	/*
   4885 	 * Set up flow-control parameters.
   4886 	 *
   4887 	 * XXX Values could probably stand some tuning.
   4888 	 */
   4889 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4890 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4891 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4892 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4893 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4894 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4895 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4896 	}
   4897 
   4898 	sc->sc_fcrtl = FCRTL_DFLT;
   4899 	if (sc->sc_type < WM_T_82543) {
   4900 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4901 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4902 	} else {
   4903 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4904 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4905 	}
   4906 
   4907 	if (sc->sc_type == WM_T_80003)
   4908 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4909 	else
   4910 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4911 
   4912 	/* Writes the control register. */
   4913 	wm_set_vlan(sc);
   4914 
   4915 	if (sc->sc_flags & WM_F_HAS_MII) {
   4916 		int val;
   4917 
   4918 		switch (sc->sc_type) {
   4919 		case WM_T_80003:
   4920 		case WM_T_ICH8:
   4921 		case WM_T_ICH9:
   4922 		case WM_T_ICH10:
   4923 		case WM_T_PCH:
   4924 		case WM_T_PCH2:
   4925 		case WM_T_PCH_LPT:
   4926 		case WM_T_PCH_SPT:
   4927 			/*
   4928 			 * Set the mac to wait the maximum time between each
   4929 			 * iteration and increase the max iterations when
   4930 			 * polling the phy; this fixes erroneous timeouts at
   4931 			 * 10Mbps.
   4932 			 */
   4933 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4934 			    0xFFFF);
   4935 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4936 			val |= 0x3F;
   4937 			wm_kmrn_writereg(sc,
   4938 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4939 			break;
   4940 		default:
   4941 			break;
   4942 		}
   4943 
   4944 		if (sc->sc_type == WM_T_80003) {
   4945 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4946 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4947 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4948 
   4949 			/* Bypass RX and TX FIFO's */
   4950 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4951 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4952 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4953 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4954 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4955 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4956 		}
   4957 	}
   4958 #if 0
   4959 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4960 #endif
   4961 
   4962 	/* Set up checksum offload parameters. */
   4963 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4964 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4965 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4966 		reg |= RXCSUM_IPOFL;
   4967 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4968 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4969 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4970 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4971 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4972 
   4973 	/* Set up MSI-X */
   4974 	if (sc->sc_nintrs > 1) {
   4975 		uint32_t ivar;
   4976 		struct wm_queue *wmq;
   4977 		int qid, qintr_idx;
   4978 
   4979 		if (sc->sc_type == WM_T_82575) {
   4980 			/* Interrupt control */
   4981 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4982 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4983 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4984 
   4985 			/* TX and RX */
   4986 			for (i = 0; i < sc->sc_nqueues; i++) {
   4987 				wmq = &sc->sc_queue[i];
   4988 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4989 				    EITR_TX_QUEUE(wmq->wmq_id)
   4990 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4991 			}
   4992 			/* Link status */
   4993 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4994 			    EITR_OTHER);
   4995 		} else if (sc->sc_type == WM_T_82574) {
   4996 			/* Interrupt control */
   4997 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4998 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4999 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5000 
   5001 			ivar = 0;
   5002 			/* TX and RX */
   5003 			for (i = 0; i < sc->sc_nqueues; i++) {
   5004 				wmq = &sc->sc_queue[i];
   5005 				qid = wmq->wmq_id;
   5006 				qintr_idx = wmq->wmq_intr_idx;
   5007 
   5008 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5009 				    IVAR_TX_MASK_Q_82574(qid));
   5010 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5011 				    IVAR_RX_MASK_Q_82574(qid));
   5012 			}
   5013 			/* Link status */
   5014 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5015 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5016 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5017 		} else {
   5018 			/* Interrupt control */
   5019 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5020 			    | GPIE_EIAME | GPIE_PBA);
   5021 
   5022 			switch (sc->sc_type) {
   5023 			case WM_T_82580:
   5024 			case WM_T_I350:
   5025 			case WM_T_I354:
   5026 			case WM_T_I210:
   5027 			case WM_T_I211:
   5028 				/* TX and RX */
   5029 				for (i = 0; i < sc->sc_nqueues; i++) {
   5030 					wmq = &sc->sc_queue[i];
   5031 					qid = wmq->wmq_id;
   5032 					qintr_idx = wmq->wmq_intr_idx;
   5033 
   5034 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5035 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5036 					ivar |= __SHIFTIN((qintr_idx
   5037 						| IVAR_VALID),
   5038 					    IVAR_TX_MASK_Q(qid));
   5039 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5040 					ivar |= __SHIFTIN((qintr_idx
   5041 						| IVAR_VALID),
   5042 					    IVAR_RX_MASK_Q(qid));
   5043 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5044 				}
   5045 				break;
   5046 			case WM_T_82576:
   5047 				/* TX and RX */
   5048 				for (i = 0; i < sc->sc_nqueues; i++) {
   5049 					wmq = &sc->sc_queue[i];
   5050 					qid = wmq->wmq_id;
   5051 					qintr_idx = wmq->wmq_intr_idx;
   5052 
   5053 					ivar = CSR_READ(sc,
   5054 					    WMREG_IVAR_Q_82576(qid));
   5055 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5056 					ivar |= __SHIFTIN((qintr_idx
   5057 						| IVAR_VALID),
   5058 					    IVAR_TX_MASK_Q_82576(qid));
   5059 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5060 					ivar |= __SHIFTIN((qintr_idx
   5061 						| IVAR_VALID),
   5062 					    IVAR_RX_MASK_Q_82576(qid));
   5063 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5064 					    ivar);
   5065 				}
   5066 				break;
   5067 			default:
   5068 				break;
   5069 			}
   5070 
   5071 			/* Link status */
   5072 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5073 			    IVAR_MISC_OTHER);
   5074 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5075 		}
   5076 
   5077 		if (sc->sc_nqueues > 1) {
   5078 			wm_init_rss(sc);
   5079 
   5080 			/*
   5081 			** NOTE: Receive Full-Packet Checksum Offload
   5082 			** is mutually exclusive with Multiqueue. However
   5083 			** this is not the same as TCP/IP checksums which
   5084 			** still work.
   5085 			*/
   5086 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5087 			reg |= RXCSUM_PCSD;
   5088 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5089 		}
   5090 	}
   5091 
   5092 	/* Set up the interrupt registers. */
   5093 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5094 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5095 	    ICR_RXO | ICR_RXT0;
   5096 	if (sc->sc_nintrs > 1) {
   5097 		uint32_t mask;
   5098 		struct wm_queue *wmq;
   5099 
   5100 		switch (sc->sc_type) {
   5101 		case WM_T_82574:
   5102 			CSR_WRITE(sc, WMREG_EIAC_82574,
   5103 			    WMREG_EIAC_82574_MSIX_MASK);
   5104 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   5105 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5106 			break;
   5107 		default:
   5108 			if (sc->sc_type == WM_T_82575) {
   5109 				mask = 0;
   5110 				for (i = 0; i < sc->sc_nqueues; i++) {
   5111 					wmq = &sc->sc_queue[i];
   5112 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5113 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5114 				}
   5115 				mask |= EITR_OTHER;
   5116 			} else {
   5117 				mask = 0;
   5118 				for (i = 0; i < sc->sc_nqueues; i++) {
   5119 					wmq = &sc->sc_queue[i];
   5120 					mask |= 1 << wmq->wmq_intr_idx;
   5121 				}
   5122 				mask |= 1 << sc->sc_link_intr_idx;
   5123 			}
   5124 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5125 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5126 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5127 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5128 			break;
   5129 		}
   5130 	} else
   5131 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5132 
   5133 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5134 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5135 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5136 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   5137 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5138 		reg |= KABGTXD_BGSQLBIAS;
   5139 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5140 	}
   5141 
   5142 	/* Set up the inter-packet gap. */
   5143 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5144 
   5145 	if (sc->sc_type >= WM_T_82543) {
   5146 		/*
   5147 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   5148 		 * the multi queue function with MSI-X.
   5149 		 */
   5150 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5151 			int qidx;
   5152 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5153 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   5154 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   5155 				    sc->sc_itr);
   5156 			}
   5157 			/*
   5158 			 * Link interrupts occur much less than TX
   5159 			 * interrupts and RX interrupts. So, we don't
   5160 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5161 			 * FreeBSD's if_igb.
   5162 			 */
   5163 		} else
   5164 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   5165 	}
   5166 
   5167 	/* Set the VLAN ethernetype. */
   5168 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5169 
   5170 	/*
   5171 	 * Set up the transmit control register; we start out with
   5172 	 * a collision distance suitable for FDX, but update it whe
   5173 	 * we resolve the media type.
   5174 	 */
   5175 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5176 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5177 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5178 	if (sc->sc_type >= WM_T_82571)
   5179 		sc->sc_tctl |= TCTL_MULR;
   5180 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5181 
   5182 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5183 		/* Write TDT after TCTL.EN is set. See the document. */
   5184 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5185 	}
   5186 
   5187 	if (sc->sc_type == WM_T_80003) {
   5188 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5189 		reg &= ~TCTL_EXT_GCEX_MASK;
   5190 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5191 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5192 	}
   5193 
   5194 	/* Set the media. */
   5195 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5196 		goto out;
   5197 
   5198 	/* Configure for OS presence */
   5199 	wm_init_manageability(sc);
   5200 
   5201 	/*
   5202 	 * Set up the receive control register; we actually program
   5203 	 * the register when we set the receive filter.  Use multicast
   5204 	 * address offset type 0.
   5205 	 *
   5206 	 * Only the i82544 has the ability to strip the incoming
   5207 	 * CRC, so we don't enable that feature.
   5208 	 */
   5209 	sc->sc_mchash_type = 0;
   5210 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5211 	    | RCTL_MO(sc->sc_mchash_type);
   5212 
   5213 	/*
   5214 	 * 82574 use one buffer extended Rx descriptor.
   5215 	 */
   5216 	if (sc->sc_type == WM_T_82574)
   5217 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5218 
   5219 	/*
   5220 	 * The I350 has a bug where it always strips the CRC whether
   5221 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5222 	 */
   5223 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5224 	    || (sc->sc_type == WM_T_I210))
   5225 		sc->sc_rctl |= RCTL_SECRC;
   5226 
   5227 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5228 	    && (ifp->if_mtu > ETHERMTU)) {
   5229 		sc->sc_rctl |= RCTL_LPE;
   5230 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5231 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5232 	}
   5233 
   5234 	if (MCLBYTES == 2048) {
   5235 		sc->sc_rctl |= RCTL_2k;
   5236 	} else {
   5237 		if (sc->sc_type >= WM_T_82543) {
   5238 			switch (MCLBYTES) {
   5239 			case 4096:
   5240 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5241 				break;
   5242 			case 8192:
   5243 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5244 				break;
   5245 			case 16384:
   5246 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5247 				break;
   5248 			default:
   5249 				panic("wm_init: MCLBYTES %d unsupported",
   5250 				    MCLBYTES);
   5251 				break;
   5252 			}
   5253 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5254 	}
   5255 
   5256 	/* Set the receive filter. */
   5257 	wm_set_filter(sc);
   5258 
   5259 	/* Enable ECC */
   5260 	switch (sc->sc_type) {
   5261 	case WM_T_82571:
   5262 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5263 		reg |= PBA_ECC_CORR_EN;
   5264 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5265 		break;
   5266 	case WM_T_PCH_LPT:
   5267 	case WM_T_PCH_SPT:
   5268 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5269 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5270 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5271 
   5272 		sc->sc_ctrl |= CTRL_MEHE;
   5273 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5274 		break;
   5275 	default:
   5276 		break;
   5277 	}
   5278 
   5279 	/* On 575 and later set RDT only if RX enabled */
   5280 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5281 		int qidx;
   5282 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5283 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5284 			for (i = 0; i < WM_NRXDESC; i++) {
   5285 				mutex_enter(rxq->rxq_lock);
   5286 				wm_init_rxdesc(rxq, i);
   5287 				mutex_exit(rxq->rxq_lock);
   5288 
   5289 			}
   5290 		}
   5291 	}
   5292 
   5293 	wm_turnon(sc);
   5294 
   5295 	/* Start the one second link check clock. */
   5296 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5297 
   5298 	/* ...all done! */
   5299 	ifp->if_flags |= IFF_RUNNING;
   5300 	ifp->if_flags &= ~IFF_OACTIVE;
   5301 
   5302  out:
   5303 	sc->sc_if_flags = ifp->if_flags;
   5304 	if (error)
   5305 		log(LOG_ERR, "%s: interface not running\n",
   5306 		    device_xname(sc->sc_dev));
   5307 	return error;
   5308 }
   5309 
   5310 /*
   5311  * wm_stop:		[ifnet interface function]
   5312  *
   5313  *	Stop transmission on the interface.
   5314  */
   5315 static void
   5316 wm_stop(struct ifnet *ifp, int disable)
   5317 {
   5318 	struct wm_softc *sc = ifp->if_softc;
   5319 
   5320 	WM_CORE_LOCK(sc);
   5321 	wm_stop_locked(ifp, disable);
   5322 	WM_CORE_UNLOCK(sc);
   5323 }
   5324 
   5325 static void
   5326 wm_stop_locked(struct ifnet *ifp, int disable)
   5327 {
   5328 	struct wm_softc *sc = ifp->if_softc;
   5329 	struct wm_txsoft *txs;
   5330 	int i, qidx;
   5331 
   5332 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5333 		device_xname(sc->sc_dev), __func__));
   5334 	KASSERT(WM_CORE_LOCKED(sc));
   5335 
   5336 	wm_turnoff(sc);
   5337 
   5338 	/* Stop the one second clock. */
   5339 	callout_stop(&sc->sc_tick_ch);
   5340 
   5341 	/* Stop the 82547 Tx FIFO stall check timer. */
   5342 	if (sc->sc_type == WM_T_82547)
   5343 		callout_stop(&sc->sc_txfifo_ch);
   5344 
   5345 	if (sc->sc_flags & WM_F_HAS_MII) {
   5346 		/* Down the MII. */
   5347 		mii_down(&sc->sc_mii);
   5348 	} else {
   5349 #if 0
   5350 		/* Should we clear PHY's status properly? */
   5351 		wm_reset(sc);
   5352 #endif
   5353 	}
   5354 
   5355 	/* Stop the transmit and receive processes. */
   5356 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5357 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5358 	sc->sc_rctl &= ~RCTL_EN;
   5359 
   5360 	/*
   5361 	 * Clear the interrupt mask to ensure the device cannot assert its
   5362 	 * interrupt line.
   5363 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5364 	 * service any currently pending or shared interrupt.
   5365 	 */
   5366 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5367 	sc->sc_icr = 0;
   5368 	if (sc->sc_nintrs > 1) {
   5369 		if (sc->sc_type != WM_T_82574) {
   5370 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5371 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5372 		} else
   5373 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5374 	}
   5375 
   5376 	/* Release any queued transmit buffers. */
   5377 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5378 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5379 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5380 		mutex_enter(txq->txq_lock);
   5381 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5382 			txs = &txq->txq_soft[i];
   5383 			if (txs->txs_mbuf != NULL) {
   5384 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5385 				m_freem(txs->txs_mbuf);
   5386 				txs->txs_mbuf = NULL;
   5387 			}
   5388 		}
   5389 		mutex_exit(txq->txq_lock);
   5390 	}
   5391 
   5392 	/* Mark the interface as down and cancel the watchdog timer. */
   5393 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5394 	ifp->if_timer = 0;
   5395 
   5396 	if (disable) {
   5397 		for (i = 0; i < sc->sc_nqueues; i++) {
   5398 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5399 			mutex_enter(rxq->rxq_lock);
   5400 			wm_rxdrain(rxq);
   5401 			mutex_exit(rxq->rxq_lock);
   5402 		}
   5403 	}
   5404 
   5405 #if 0 /* notyet */
   5406 	if (sc->sc_type >= WM_T_82544)
   5407 		CSR_WRITE(sc, WMREG_WUC, 0);
   5408 #endif
   5409 }
   5410 
   5411 static void
   5412 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5413 {
   5414 	struct mbuf *m;
   5415 	int i;
   5416 
   5417 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5418 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5419 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5420 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5421 		    m->m_data, m->m_len, m->m_flags);
   5422 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5423 	    i, i == 1 ? "" : "s");
   5424 }
   5425 
   5426 /*
   5427  * wm_82547_txfifo_stall:
   5428  *
   5429  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5430  *	reset the FIFO pointers, and restart packet transmission.
   5431  */
   5432 static void
   5433 wm_82547_txfifo_stall(void *arg)
   5434 {
   5435 	struct wm_softc *sc = arg;
   5436 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5437 
   5438 	mutex_enter(txq->txq_lock);
   5439 
   5440 	if (txq->txq_stopping)
   5441 		goto out;
   5442 
   5443 	if (txq->txq_fifo_stall) {
   5444 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5445 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5446 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5447 			/*
   5448 			 * Packets have drained.  Stop transmitter, reset
   5449 			 * FIFO pointers, restart transmitter, and kick
   5450 			 * the packet queue.
   5451 			 */
   5452 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5453 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5454 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5455 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5456 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5457 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5458 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5459 			CSR_WRITE_FLUSH(sc);
   5460 
   5461 			txq->txq_fifo_head = 0;
   5462 			txq->txq_fifo_stall = 0;
   5463 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5464 		} else {
   5465 			/*
   5466 			 * Still waiting for packets to drain; try again in
   5467 			 * another tick.
   5468 			 */
   5469 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5470 		}
   5471 	}
   5472 
   5473 out:
   5474 	mutex_exit(txq->txq_lock);
   5475 }
   5476 
   5477 /*
   5478  * wm_82547_txfifo_bugchk:
   5479  *
   5480  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5481  *	prevent enqueueing a packet that would wrap around the end
   5482  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5483  *
   5484  *	We do this by checking the amount of space before the end
   5485  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5486  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5487  *	the internal FIFO pointers to the beginning, and restart
   5488  *	transmission on the interface.
   5489  */
   5490 #define	WM_FIFO_HDR		0x10
   5491 #define	WM_82547_PAD_LEN	0x3e0
   5492 static int
   5493 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5494 {
   5495 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5496 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5497 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5498 
   5499 	/* Just return if already stalled. */
   5500 	if (txq->txq_fifo_stall)
   5501 		return 1;
   5502 
   5503 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5504 		/* Stall only occurs in half-duplex mode. */
   5505 		goto send_packet;
   5506 	}
   5507 
   5508 	if (len >= WM_82547_PAD_LEN + space) {
   5509 		txq->txq_fifo_stall = 1;
   5510 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5511 		return 1;
   5512 	}
   5513 
   5514  send_packet:
   5515 	txq->txq_fifo_head += len;
   5516 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5517 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5518 
   5519 	return 0;
   5520 }
   5521 
   5522 static int
   5523 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5524 {
   5525 	int error;
   5526 
   5527 	/*
   5528 	 * Allocate the control data structures, and create and load the
   5529 	 * DMA map for it.
   5530 	 *
   5531 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5532 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5533 	 * both sets within the same 4G segment.
   5534 	 */
   5535 	if (sc->sc_type < WM_T_82544)
   5536 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5537 	else
   5538 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5539 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5540 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5541 	else
   5542 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5543 
   5544 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5545 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5546 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5547 		aprint_error_dev(sc->sc_dev,
   5548 		    "unable to allocate TX control data, error = %d\n",
   5549 		    error);
   5550 		goto fail_0;
   5551 	}
   5552 
   5553 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5554 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5555 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5556 		aprint_error_dev(sc->sc_dev,
   5557 		    "unable to map TX control data, error = %d\n", error);
   5558 		goto fail_1;
   5559 	}
   5560 
   5561 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5562 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5563 		aprint_error_dev(sc->sc_dev,
   5564 		    "unable to create TX control data DMA map, error = %d\n",
   5565 		    error);
   5566 		goto fail_2;
   5567 	}
   5568 
   5569 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5570 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5571 		aprint_error_dev(sc->sc_dev,
   5572 		    "unable to load TX control data DMA map, error = %d\n",
   5573 		    error);
   5574 		goto fail_3;
   5575 	}
   5576 
   5577 	return 0;
   5578 
   5579  fail_3:
   5580 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5581  fail_2:
   5582 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5583 	    WM_TXDESCS_SIZE(txq));
   5584  fail_1:
   5585 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5586  fail_0:
   5587 	return error;
   5588 }
   5589 
   5590 static void
   5591 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5592 {
   5593 
   5594 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5595 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5596 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5597 	    WM_TXDESCS_SIZE(txq));
   5598 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5599 }
   5600 
   5601 static int
   5602 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5603 {
   5604 	int error;
   5605 	size_t rxq_descs_size;
   5606 
   5607 	/*
   5608 	 * Allocate the control data structures, and create and load the
   5609 	 * DMA map for it.
   5610 	 *
   5611 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5612 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5613 	 * both sets within the same 4G segment.
   5614 	 */
   5615 	rxq->rxq_ndesc = WM_NRXDESC;
   5616 	if (sc->sc_type == WM_T_82574)
   5617 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   5618 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5619 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   5620 	else
   5621 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   5622 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   5623 
   5624 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   5625 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5626 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5627 		aprint_error_dev(sc->sc_dev,
   5628 		    "unable to allocate RX control data, error = %d\n",
   5629 		    error);
   5630 		goto fail_0;
   5631 	}
   5632 
   5633 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5634 		    rxq->rxq_desc_rseg, rxq_descs_size,
   5635 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5636 		aprint_error_dev(sc->sc_dev,
   5637 		    "unable to map RX control data, error = %d\n", error);
   5638 		goto fail_1;
   5639 	}
   5640 
   5641 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   5642 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5643 		aprint_error_dev(sc->sc_dev,
   5644 		    "unable to create RX control data DMA map, error = %d\n",
   5645 		    error);
   5646 		goto fail_2;
   5647 	}
   5648 
   5649 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5650 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   5651 		aprint_error_dev(sc->sc_dev,
   5652 		    "unable to load RX control data DMA map, error = %d\n",
   5653 		    error);
   5654 		goto fail_3;
   5655 	}
   5656 
   5657 	return 0;
   5658 
   5659  fail_3:
   5660 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5661  fail_2:
   5662 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5663 	    rxq_descs_size);
   5664  fail_1:
   5665 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5666  fail_0:
   5667 	return error;
   5668 }
   5669 
   5670 static void
   5671 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5672 {
   5673 
   5674 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5675 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5676 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5677 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   5678 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5679 }
   5680 
   5681 
   5682 static int
   5683 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5684 {
   5685 	int i, error;
   5686 
   5687 	/* Create the transmit buffer DMA maps. */
   5688 	WM_TXQUEUELEN(txq) =
   5689 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5690 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5691 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5692 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5693 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5694 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5695 			aprint_error_dev(sc->sc_dev,
   5696 			    "unable to create Tx DMA map %d, error = %d\n",
   5697 			    i, error);
   5698 			goto fail;
   5699 		}
   5700 	}
   5701 
   5702 	return 0;
   5703 
   5704  fail:
   5705 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5706 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5707 			bus_dmamap_destroy(sc->sc_dmat,
   5708 			    txq->txq_soft[i].txs_dmamap);
   5709 	}
   5710 	return error;
   5711 }
   5712 
   5713 static void
   5714 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5715 {
   5716 	int i;
   5717 
   5718 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5719 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5720 			bus_dmamap_destroy(sc->sc_dmat,
   5721 			    txq->txq_soft[i].txs_dmamap);
   5722 	}
   5723 }
   5724 
   5725 static int
   5726 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5727 {
   5728 	int i, error;
   5729 
   5730 	/* Create the receive buffer DMA maps. */
   5731 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5732 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5733 			    MCLBYTES, 0, 0,
   5734 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5735 			aprint_error_dev(sc->sc_dev,
   5736 			    "unable to create Rx DMA map %d error = %d\n",
   5737 			    i, error);
   5738 			goto fail;
   5739 		}
   5740 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5741 	}
   5742 
   5743 	return 0;
   5744 
   5745  fail:
   5746 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5747 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5748 			bus_dmamap_destroy(sc->sc_dmat,
   5749 			    rxq->rxq_soft[i].rxs_dmamap);
   5750 	}
   5751 	return error;
   5752 }
   5753 
   5754 static void
   5755 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5756 {
   5757 	int i;
   5758 
   5759 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5760 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5761 			bus_dmamap_destroy(sc->sc_dmat,
   5762 			    rxq->rxq_soft[i].rxs_dmamap);
   5763 	}
   5764 }
   5765 
   5766 /*
   5767  * wm_alloc_quques:
   5768  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5769  */
   5770 static int
   5771 wm_alloc_txrx_queues(struct wm_softc *sc)
   5772 {
   5773 	int i, error, tx_done, rx_done;
   5774 
   5775 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5776 	    KM_SLEEP);
   5777 	if (sc->sc_queue == NULL) {
   5778 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5779 		error = ENOMEM;
   5780 		goto fail_0;
   5781 	}
   5782 
   5783 	/*
   5784 	 * For transmission
   5785 	 */
   5786 	error = 0;
   5787 	tx_done = 0;
   5788 	for (i = 0; i < sc->sc_nqueues; i++) {
   5789 #ifdef WM_EVENT_COUNTERS
   5790 		int j;
   5791 		const char *xname;
   5792 #endif
   5793 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5794 		txq->txq_sc = sc;
   5795 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5796 
   5797 		error = wm_alloc_tx_descs(sc, txq);
   5798 		if (error)
   5799 			break;
   5800 		error = wm_alloc_tx_buffer(sc, txq);
   5801 		if (error) {
   5802 			wm_free_tx_descs(sc, txq);
   5803 			break;
   5804 		}
   5805 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5806 		if (txq->txq_interq == NULL) {
   5807 			wm_free_tx_descs(sc, txq);
   5808 			wm_free_tx_buffer(sc, txq);
   5809 			error = ENOMEM;
   5810 			break;
   5811 		}
   5812 
   5813 #ifdef WM_EVENT_COUNTERS
   5814 		xname = device_xname(sc->sc_dev);
   5815 
   5816 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5817 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5818 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5819 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5820 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5821 
   5822 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5823 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5824 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5825 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5826 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5827 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5828 
   5829 		for (j = 0; j < WM_NTXSEGS; j++) {
   5830 			snprintf(txq->txq_txseg_evcnt_names[j],
   5831 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   5832 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   5833 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   5834 		}
   5835 
   5836 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   5837 
   5838 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   5839 #endif /* WM_EVENT_COUNTERS */
   5840 
   5841 		tx_done++;
   5842 	}
   5843 	if (error)
   5844 		goto fail_1;
   5845 
   5846 	/*
   5847 	 * For recieve
   5848 	 */
   5849 	error = 0;
   5850 	rx_done = 0;
   5851 	for (i = 0; i < sc->sc_nqueues; i++) {
   5852 #ifdef WM_EVENT_COUNTERS
   5853 		const char *xname;
   5854 #endif
   5855 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5856 		rxq->rxq_sc = sc;
   5857 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5858 
   5859 		error = wm_alloc_rx_descs(sc, rxq);
   5860 		if (error)
   5861 			break;
   5862 
   5863 		error = wm_alloc_rx_buffer(sc, rxq);
   5864 		if (error) {
   5865 			wm_free_rx_descs(sc, rxq);
   5866 			break;
   5867 		}
   5868 
   5869 #ifdef WM_EVENT_COUNTERS
   5870 		xname = device_xname(sc->sc_dev);
   5871 
   5872 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   5873 
   5874 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   5875 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   5876 #endif /* WM_EVENT_COUNTERS */
   5877 
   5878 		rx_done++;
   5879 	}
   5880 	if (error)
   5881 		goto fail_2;
   5882 
   5883 	return 0;
   5884 
   5885  fail_2:
   5886 	for (i = 0; i < rx_done; i++) {
   5887 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5888 		wm_free_rx_buffer(sc, rxq);
   5889 		wm_free_rx_descs(sc, rxq);
   5890 		if (rxq->rxq_lock)
   5891 			mutex_obj_free(rxq->rxq_lock);
   5892 	}
   5893  fail_1:
   5894 	for (i = 0; i < tx_done; i++) {
   5895 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5896 		pcq_destroy(txq->txq_interq);
   5897 		wm_free_tx_buffer(sc, txq);
   5898 		wm_free_tx_descs(sc, txq);
   5899 		if (txq->txq_lock)
   5900 			mutex_obj_free(txq->txq_lock);
   5901 	}
   5902 
   5903 	kmem_free(sc->sc_queue,
   5904 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5905  fail_0:
   5906 	return error;
   5907 }
   5908 
   5909 /*
   5910  * wm_free_quques:
   5911  *	Free {tx,rx}descs and {tx,rx} buffers
   5912  */
   5913 static void
   5914 wm_free_txrx_queues(struct wm_softc *sc)
   5915 {
   5916 	int i;
   5917 
   5918 	for (i = 0; i < sc->sc_nqueues; i++) {
   5919 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5920 		wm_free_rx_buffer(sc, rxq);
   5921 		wm_free_rx_descs(sc, rxq);
   5922 		if (rxq->rxq_lock)
   5923 			mutex_obj_free(rxq->rxq_lock);
   5924 	}
   5925 
   5926 	for (i = 0; i < sc->sc_nqueues; i++) {
   5927 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5928 		struct mbuf *m;
   5929 
   5930 		/* drain txq_interq */
   5931 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   5932 			m_freem(m);
   5933 		pcq_destroy(txq->txq_interq);
   5934 
   5935 		wm_free_tx_buffer(sc, txq);
   5936 		wm_free_tx_descs(sc, txq);
   5937 		if (txq->txq_lock)
   5938 			mutex_obj_free(txq->txq_lock);
   5939 	}
   5940 
   5941 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5942 }
   5943 
   5944 static void
   5945 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5946 {
   5947 
   5948 	KASSERT(mutex_owned(txq->txq_lock));
   5949 
   5950 	/* Initialize the transmit descriptor ring. */
   5951 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5952 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5953 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5954 	txq->txq_free = WM_NTXDESC(txq);
   5955 	txq->txq_next = 0;
   5956 }
   5957 
   5958 static void
   5959 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5960     struct wm_txqueue *txq)
   5961 {
   5962 
   5963 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5964 		device_xname(sc->sc_dev), __func__));
   5965 	KASSERT(mutex_owned(txq->txq_lock));
   5966 
   5967 	if (sc->sc_type < WM_T_82543) {
   5968 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5969 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5970 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5971 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5972 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5973 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5974 	} else {
   5975 		int qid = wmq->wmq_id;
   5976 
   5977 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5978 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5979 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5980 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5981 
   5982 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5983 			/*
   5984 			 * Don't write TDT before TCTL.EN is set.
   5985 			 * See the document.
   5986 			 */
   5987 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5988 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5989 			    | TXDCTL_WTHRESH(0));
   5990 		else {
   5991 			/* ITR / 4 */
   5992 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5993 			if (sc->sc_type >= WM_T_82540) {
   5994 				/* should be same */
   5995 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5996 			}
   5997 
   5998 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5999 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6000 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6001 		}
   6002 	}
   6003 }
   6004 
   6005 static void
   6006 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6007 {
   6008 	int i;
   6009 
   6010 	KASSERT(mutex_owned(txq->txq_lock));
   6011 
   6012 	/* Initialize the transmit job descriptors. */
   6013 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6014 		txq->txq_soft[i].txs_mbuf = NULL;
   6015 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6016 	txq->txq_snext = 0;
   6017 	txq->txq_sdirty = 0;
   6018 }
   6019 
   6020 static void
   6021 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6022     struct wm_txqueue *txq)
   6023 {
   6024 
   6025 	KASSERT(mutex_owned(txq->txq_lock));
   6026 
   6027 	/*
   6028 	 * Set up some register offsets that are different between
   6029 	 * the i82542 and the i82543 and later chips.
   6030 	 */
   6031 	if (sc->sc_type < WM_T_82543)
   6032 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6033 	else
   6034 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6035 
   6036 	wm_init_tx_descs(sc, txq);
   6037 	wm_init_tx_regs(sc, wmq, txq);
   6038 	wm_init_tx_buffer(sc, txq);
   6039 }
   6040 
   6041 static void
   6042 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6043     struct wm_rxqueue *rxq)
   6044 {
   6045 
   6046 	KASSERT(mutex_owned(rxq->rxq_lock));
   6047 
   6048 	/*
   6049 	 * Initialize the receive descriptor and receive job
   6050 	 * descriptor rings.
   6051 	 */
   6052 	if (sc->sc_type < WM_T_82543) {
   6053 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6054 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6055 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6056 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6057 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6058 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6059 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6060 
   6061 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6062 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6063 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6064 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6065 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6066 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6067 	} else {
   6068 		int qid = wmq->wmq_id;
   6069 
   6070 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6071 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6072 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6073 
   6074 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6075 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6076 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   6077 
   6078 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6079 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6080 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6081 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6082 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6083 			    | RXDCTL_WTHRESH(1));
   6084 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6085 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6086 		} else {
   6087 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6088 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6089 			/* ITR / 4 */
   6090 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   6091 			/* MUST be same */
   6092 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   6093 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6094 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6095 		}
   6096 	}
   6097 }
   6098 
   6099 static int
   6100 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6101 {
   6102 	struct wm_rxsoft *rxs;
   6103 	int error, i;
   6104 
   6105 	KASSERT(mutex_owned(rxq->rxq_lock));
   6106 
   6107 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6108 		rxs = &rxq->rxq_soft[i];
   6109 		if (rxs->rxs_mbuf == NULL) {
   6110 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6111 				log(LOG_ERR, "%s: unable to allocate or map "
   6112 				    "rx buffer %d, error = %d\n",
   6113 				    device_xname(sc->sc_dev), i, error);
   6114 				/*
   6115 				 * XXX Should attempt to run with fewer receive
   6116 				 * XXX buffers instead of just failing.
   6117 				 */
   6118 				wm_rxdrain(rxq);
   6119 				return ENOMEM;
   6120 			}
   6121 		} else {
   6122 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6123 				wm_init_rxdesc(rxq, i);
   6124 			/*
   6125 			 * For 82575 and newer device, the RX descriptors
   6126 			 * must be initialized after the setting of RCTL.EN in
   6127 			 * wm_set_filter()
   6128 			 */
   6129 		}
   6130 	}
   6131 	rxq->rxq_ptr = 0;
   6132 	rxq->rxq_discard = 0;
   6133 	WM_RXCHAIN_RESET(rxq);
   6134 
   6135 	return 0;
   6136 }
   6137 
   6138 static int
   6139 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6140     struct wm_rxqueue *rxq)
   6141 {
   6142 
   6143 	KASSERT(mutex_owned(rxq->rxq_lock));
   6144 
   6145 	/*
   6146 	 * Set up some register offsets that are different between
   6147 	 * the i82542 and the i82543 and later chips.
   6148 	 */
   6149 	if (sc->sc_type < WM_T_82543)
   6150 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6151 	else
   6152 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6153 
   6154 	wm_init_rx_regs(sc, wmq, rxq);
   6155 	return wm_init_rx_buffer(sc, rxq);
   6156 }
   6157 
   6158 /*
   6159  * wm_init_quques:
   6160  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6161  */
   6162 static int
   6163 wm_init_txrx_queues(struct wm_softc *sc)
   6164 {
   6165 	int i, error = 0;
   6166 
   6167 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6168 		device_xname(sc->sc_dev), __func__));
   6169 
   6170 	for (i = 0; i < sc->sc_nqueues; i++) {
   6171 		struct wm_queue *wmq = &sc->sc_queue[i];
   6172 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6173 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6174 
   6175 		mutex_enter(txq->txq_lock);
   6176 		wm_init_tx_queue(sc, wmq, txq);
   6177 		mutex_exit(txq->txq_lock);
   6178 
   6179 		mutex_enter(rxq->rxq_lock);
   6180 		error = wm_init_rx_queue(sc, wmq, rxq);
   6181 		mutex_exit(rxq->rxq_lock);
   6182 		if (error)
   6183 			break;
   6184 	}
   6185 
   6186 	return error;
   6187 }
   6188 
   6189 /*
   6190  * wm_tx_offload:
   6191  *
   6192  *	Set up TCP/IP checksumming parameters for the
   6193  *	specified packet.
   6194  */
   6195 static int
   6196 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   6197     uint8_t *fieldsp)
   6198 {
   6199 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6200 	struct mbuf *m0 = txs->txs_mbuf;
   6201 	struct livengood_tcpip_ctxdesc *t;
   6202 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6203 	uint32_t ipcse;
   6204 	struct ether_header *eh;
   6205 	int offset, iphl;
   6206 	uint8_t fields;
   6207 
   6208 	/*
   6209 	 * XXX It would be nice if the mbuf pkthdr had offset
   6210 	 * fields for the protocol headers.
   6211 	 */
   6212 
   6213 	eh = mtod(m0, struct ether_header *);
   6214 	switch (htons(eh->ether_type)) {
   6215 	case ETHERTYPE_IP:
   6216 	case ETHERTYPE_IPV6:
   6217 		offset = ETHER_HDR_LEN;
   6218 		break;
   6219 
   6220 	case ETHERTYPE_VLAN:
   6221 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6222 		break;
   6223 
   6224 	default:
   6225 		/*
   6226 		 * Don't support this protocol or encapsulation.
   6227 		 */
   6228 		*fieldsp = 0;
   6229 		*cmdp = 0;
   6230 		return 0;
   6231 	}
   6232 
   6233 	if ((m0->m_pkthdr.csum_flags &
   6234 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6235 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6236 	} else {
   6237 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6238 	}
   6239 	ipcse = offset + iphl - 1;
   6240 
   6241 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6242 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6243 	seg = 0;
   6244 	fields = 0;
   6245 
   6246 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6247 		int hlen = offset + iphl;
   6248 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6249 
   6250 		if (__predict_false(m0->m_len <
   6251 				    (hlen + sizeof(struct tcphdr)))) {
   6252 			/*
   6253 			 * TCP/IP headers are not in the first mbuf; we need
   6254 			 * to do this the slow and painful way.  Let's just
   6255 			 * hope this doesn't happen very often.
   6256 			 */
   6257 			struct tcphdr th;
   6258 
   6259 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6260 
   6261 			m_copydata(m0, hlen, sizeof(th), &th);
   6262 			if (v4) {
   6263 				struct ip ip;
   6264 
   6265 				m_copydata(m0, offset, sizeof(ip), &ip);
   6266 				ip.ip_len = 0;
   6267 				m_copyback(m0,
   6268 				    offset + offsetof(struct ip, ip_len),
   6269 				    sizeof(ip.ip_len), &ip.ip_len);
   6270 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6271 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6272 			} else {
   6273 				struct ip6_hdr ip6;
   6274 
   6275 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6276 				ip6.ip6_plen = 0;
   6277 				m_copyback(m0,
   6278 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6279 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6280 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6281 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6282 			}
   6283 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6284 			    sizeof(th.th_sum), &th.th_sum);
   6285 
   6286 			hlen += th.th_off << 2;
   6287 		} else {
   6288 			/*
   6289 			 * TCP/IP headers are in the first mbuf; we can do
   6290 			 * this the easy way.
   6291 			 */
   6292 			struct tcphdr *th;
   6293 
   6294 			if (v4) {
   6295 				struct ip *ip =
   6296 				    (void *)(mtod(m0, char *) + offset);
   6297 				th = (void *)(mtod(m0, char *) + hlen);
   6298 
   6299 				ip->ip_len = 0;
   6300 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6301 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6302 			} else {
   6303 				struct ip6_hdr *ip6 =
   6304 				    (void *)(mtod(m0, char *) + offset);
   6305 				th = (void *)(mtod(m0, char *) + hlen);
   6306 
   6307 				ip6->ip6_plen = 0;
   6308 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6309 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6310 			}
   6311 			hlen += th->th_off << 2;
   6312 		}
   6313 
   6314 		if (v4) {
   6315 			WM_Q_EVCNT_INCR(txq, txtso);
   6316 			cmdlen |= WTX_TCPIP_CMD_IP;
   6317 		} else {
   6318 			WM_Q_EVCNT_INCR(txq, txtso6);
   6319 			ipcse = 0;
   6320 		}
   6321 		cmd |= WTX_TCPIP_CMD_TSE;
   6322 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6323 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6324 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6325 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6326 	}
   6327 
   6328 	/*
   6329 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6330 	 * offload feature, if we load the context descriptor, we
   6331 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6332 	 */
   6333 
   6334 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6335 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6336 	    WTX_TCPIP_IPCSE(ipcse);
   6337 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6338 		WM_Q_EVCNT_INCR(txq, txipsum);
   6339 		fields |= WTX_IXSM;
   6340 	}
   6341 
   6342 	offset += iphl;
   6343 
   6344 	if (m0->m_pkthdr.csum_flags &
   6345 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6346 		WM_Q_EVCNT_INCR(txq, txtusum);
   6347 		fields |= WTX_TXSM;
   6348 		tucs = WTX_TCPIP_TUCSS(offset) |
   6349 		    WTX_TCPIP_TUCSO(offset +
   6350 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6351 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6352 	} else if ((m0->m_pkthdr.csum_flags &
   6353 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6354 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6355 		fields |= WTX_TXSM;
   6356 		tucs = WTX_TCPIP_TUCSS(offset) |
   6357 		    WTX_TCPIP_TUCSO(offset +
   6358 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6359 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6360 	} else {
   6361 		/* Just initialize it to a valid TCP context. */
   6362 		tucs = WTX_TCPIP_TUCSS(offset) |
   6363 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6364 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6365 	}
   6366 
   6367 	/* Fill in the context descriptor. */
   6368 	t = (struct livengood_tcpip_ctxdesc *)
   6369 	    &txq->txq_descs[txq->txq_next];
   6370 	t->tcpip_ipcs = htole32(ipcs);
   6371 	t->tcpip_tucs = htole32(tucs);
   6372 	t->tcpip_cmdlen = htole32(cmdlen);
   6373 	t->tcpip_seg = htole32(seg);
   6374 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6375 
   6376 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6377 	txs->txs_ndesc++;
   6378 
   6379 	*cmdp = cmd;
   6380 	*fieldsp = fields;
   6381 
   6382 	return 0;
   6383 }
   6384 
   6385 static inline int
   6386 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6387 {
   6388 	struct wm_softc *sc = ifp->if_softc;
   6389 	u_int cpuid = cpu_index(curcpu());
   6390 
   6391 	/*
   6392 	 * Currently, simple distribute strategy.
   6393 	 * TODO:
   6394 	 * distribute by flowid(RSS has value).
   6395 	 */
   6396 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6397 }
   6398 
   6399 /*
   6400  * wm_start:		[ifnet interface function]
   6401  *
   6402  *	Start packet transmission on the interface.
   6403  */
   6404 static void
   6405 wm_start(struct ifnet *ifp)
   6406 {
   6407 	struct wm_softc *sc = ifp->if_softc;
   6408 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6409 
   6410 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6411 
   6412 	/*
   6413 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6414 	 */
   6415 
   6416 	mutex_enter(txq->txq_lock);
   6417 	if (!txq->txq_stopping)
   6418 		wm_start_locked(ifp);
   6419 	mutex_exit(txq->txq_lock);
   6420 }
   6421 
   6422 static void
   6423 wm_start_locked(struct ifnet *ifp)
   6424 {
   6425 	struct wm_softc *sc = ifp->if_softc;
   6426 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6427 
   6428 	wm_send_common_locked(ifp, txq, false);
   6429 }
   6430 
   6431 static int
   6432 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6433 {
   6434 	int qid;
   6435 	struct wm_softc *sc = ifp->if_softc;
   6436 	struct wm_txqueue *txq;
   6437 
   6438 	qid = wm_select_txqueue(ifp, m);
   6439 	txq = &sc->sc_queue[qid].wmq_txq;
   6440 
   6441 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6442 		m_freem(m);
   6443 		WM_Q_EVCNT_INCR(txq, txdrop);
   6444 		return ENOBUFS;
   6445 	}
   6446 
   6447 	/*
   6448 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6449 	 */
   6450 	ifp->if_obytes += m->m_pkthdr.len;
   6451 	if (m->m_flags & M_MCAST)
   6452 		ifp->if_omcasts++;
   6453 
   6454 	if (mutex_tryenter(txq->txq_lock)) {
   6455 		if (!txq->txq_stopping)
   6456 			wm_transmit_locked(ifp, txq);
   6457 		mutex_exit(txq->txq_lock);
   6458 	}
   6459 
   6460 	return 0;
   6461 }
   6462 
   6463 static void
   6464 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6465 {
   6466 
   6467 	wm_send_common_locked(ifp, txq, true);
   6468 }
   6469 
   6470 static void
   6471 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6472     bool is_transmit)
   6473 {
   6474 	struct wm_softc *sc = ifp->if_softc;
   6475 	struct mbuf *m0;
   6476 	struct m_tag *mtag;
   6477 	struct wm_txsoft *txs;
   6478 	bus_dmamap_t dmamap;
   6479 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6480 	bus_addr_t curaddr;
   6481 	bus_size_t seglen, curlen;
   6482 	uint32_t cksumcmd;
   6483 	uint8_t cksumfields;
   6484 
   6485 	KASSERT(mutex_owned(txq->txq_lock));
   6486 
   6487 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6488 		return;
   6489 
   6490 	/* Remember the previous number of free descriptors. */
   6491 	ofree = txq->txq_free;
   6492 
   6493 	/*
   6494 	 * Loop through the send queue, setting up transmit descriptors
   6495 	 * until we drain the queue, or use up all available transmit
   6496 	 * descriptors.
   6497 	 */
   6498 	for (;;) {
   6499 		m0 = NULL;
   6500 
   6501 		/* Get a work queue entry. */
   6502 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6503 			wm_txeof(sc, txq);
   6504 			if (txq->txq_sfree == 0) {
   6505 				DPRINTF(WM_DEBUG_TX,
   6506 				    ("%s: TX: no free job descriptors\n",
   6507 					device_xname(sc->sc_dev)));
   6508 				WM_Q_EVCNT_INCR(txq, txsstall);
   6509 				break;
   6510 			}
   6511 		}
   6512 
   6513 		/* Grab a packet off the queue. */
   6514 		if (is_transmit)
   6515 			m0 = pcq_get(txq->txq_interq);
   6516 		else
   6517 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6518 		if (m0 == NULL)
   6519 			break;
   6520 
   6521 		DPRINTF(WM_DEBUG_TX,
   6522 		    ("%s: TX: have packet to transmit: %p\n",
   6523 		    device_xname(sc->sc_dev), m0));
   6524 
   6525 		txs = &txq->txq_soft[txq->txq_snext];
   6526 		dmamap = txs->txs_dmamap;
   6527 
   6528 		use_tso = (m0->m_pkthdr.csum_flags &
   6529 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6530 
   6531 		/*
   6532 		 * So says the Linux driver:
   6533 		 * The controller does a simple calculation to make sure
   6534 		 * there is enough room in the FIFO before initiating the
   6535 		 * DMA for each buffer.  The calc is:
   6536 		 *	4 = ceil(buffer len / MSS)
   6537 		 * To make sure we don't overrun the FIFO, adjust the max
   6538 		 * buffer len if the MSS drops.
   6539 		 */
   6540 		dmamap->dm_maxsegsz =
   6541 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6542 		    ? m0->m_pkthdr.segsz << 2
   6543 		    : WTX_MAX_LEN;
   6544 
   6545 		/*
   6546 		 * Load the DMA map.  If this fails, the packet either
   6547 		 * didn't fit in the allotted number of segments, or we
   6548 		 * were short on resources.  For the too-many-segments
   6549 		 * case, we simply report an error and drop the packet,
   6550 		 * since we can't sanely copy a jumbo packet to a single
   6551 		 * buffer.
   6552 		 */
   6553 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6554 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6555 		if (error) {
   6556 			if (error == EFBIG) {
   6557 				WM_Q_EVCNT_INCR(txq, txdrop);
   6558 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6559 				    "DMA segments, dropping...\n",
   6560 				    device_xname(sc->sc_dev));
   6561 				wm_dump_mbuf_chain(sc, m0);
   6562 				m_freem(m0);
   6563 				continue;
   6564 			}
   6565 			/*  Short on resources, just stop for now. */
   6566 			DPRINTF(WM_DEBUG_TX,
   6567 			    ("%s: TX: dmamap load failed: %d\n",
   6568 			    device_xname(sc->sc_dev), error));
   6569 			break;
   6570 		}
   6571 
   6572 		segs_needed = dmamap->dm_nsegs;
   6573 		if (use_tso) {
   6574 			/* For sentinel descriptor; see below. */
   6575 			segs_needed++;
   6576 		}
   6577 
   6578 		/*
   6579 		 * Ensure we have enough descriptors free to describe
   6580 		 * the packet.  Note, we always reserve one descriptor
   6581 		 * at the end of the ring due to the semantics of the
   6582 		 * TDT register, plus one more in the event we need
   6583 		 * to load offload context.
   6584 		 */
   6585 		if (segs_needed > txq->txq_free - 2) {
   6586 			/*
   6587 			 * Not enough free descriptors to transmit this
   6588 			 * packet.  We haven't committed anything yet,
   6589 			 * so just unload the DMA map, put the packet
   6590 			 * pack on the queue, and punt.  Notify the upper
   6591 			 * layer that there are no more slots left.
   6592 			 */
   6593 			DPRINTF(WM_DEBUG_TX,
   6594 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6595 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6596 			    segs_needed, txq->txq_free - 1));
   6597 			ifp->if_flags |= IFF_OACTIVE;
   6598 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6599 			WM_Q_EVCNT_INCR(txq, txdstall);
   6600 			break;
   6601 		}
   6602 
   6603 		/*
   6604 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6605 		 * once we know we can transmit the packet, since we
   6606 		 * do some internal FIFO space accounting here.
   6607 		 */
   6608 		if (sc->sc_type == WM_T_82547 &&
   6609 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6610 			DPRINTF(WM_DEBUG_TX,
   6611 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6612 			    device_xname(sc->sc_dev)));
   6613 			ifp->if_flags |= IFF_OACTIVE;
   6614 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6615 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6616 			break;
   6617 		}
   6618 
   6619 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6620 
   6621 		DPRINTF(WM_DEBUG_TX,
   6622 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6623 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6624 
   6625 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6626 
   6627 		/*
   6628 		 * Store a pointer to the packet so that we can free it
   6629 		 * later.
   6630 		 *
   6631 		 * Initially, we consider the number of descriptors the
   6632 		 * packet uses the number of DMA segments.  This may be
   6633 		 * incremented by 1 if we do checksum offload (a descriptor
   6634 		 * is used to set the checksum context).
   6635 		 */
   6636 		txs->txs_mbuf = m0;
   6637 		txs->txs_firstdesc = txq->txq_next;
   6638 		txs->txs_ndesc = segs_needed;
   6639 
   6640 		/* Set up offload parameters for this packet. */
   6641 		if (m0->m_pkthdr.csum_flags &
   6642 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6643 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6644 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6645 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6646 					  &cksumfields) != 0) {
   6647 				/* Error message already displayed. */
   6648 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6649 				continue;
   6650 			}
   6651 		} else {
   6652 			cksumcmd = 0;
   6653 			cksumfields = 0;
   6654 		}
   6655 
   6656 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6657 
   6658 		/* Sync the DMA map. */
   6659 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6660 		    BUS_DMASYNC_PREWRITE);
   6661 
   6662 		/* Initialize the transmit descriptor. */
   6663 		for (nexttx = txq->txq_next, seg = 0;
   6664 		     seg < dmamap->dm_nsegs; seg++) {
   6665 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6666 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6667 			     seglen != 0;
   6668 			     curaddr += curlen, seglen -= curlen,
   6669 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6670 				curlen = seglen;
   6671 
   6672 				/*
   6673 				 * So says the Linux driver:
   6674 				 * Work around for premature descriptor
   6675 				 * write-backs in TSO mode.  Append a
   6676 				 * 4-byte sentinel descriptor.
   6677 				 */
   6678 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6679 				    curlen > 8)
   6680 					curlen -= 4;
   6681 
   6682 				wm_set_dma_addr(
   6683 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6684 				txq->txq_descs[nexttx].wtx_cmdlen
   6685 				    = htole32(cksumcmd | curlen);
   6686 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6687 				    = 0;
   6688 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6689 				    = cksumfields;
   6690 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6691 				lasttx = nexttx;
   6692 
   6693 				DPRINTF(WM_DEBUG_TX,
   6694 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6695 				     "len %#04zx\n",
   6696 				    device_xname(sc->sc_dev), nexttx,
   6697 				    (uint64_t)curaddr, curlen));
   6698 			}
   6699 		}
   6700 
   6701 		KASSERT(lasttx != -1);
   6702 
   6703 		/*
   6704 		 * Set up the command byte on the last descriptor of
   6705 		 * the packet.  If we're in the interrupt delay window,
   6706 		 * delay the interrupt.
   6707 		 */
   6708 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6709 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6710 
   6711 		/*
   6712 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6713 		 * up the descriptor to encapsulate the packet for us.
   6714 		 *
   6715 		 * This is only valid on the last descriptor of the packet.
   6716 		 */
   6717 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6718 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6719 			    htole32(WTX_CMD_VLE);
   6720 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6721 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6722 		}
   6723 
   6724 		txs->txs_lastdesc = lasttx;
   6725 
   6726 		DPRINTF(WM_DEBUG_TX,
   6727 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6728 		    device_xname(sc->sc_dev),
   6729 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6730 
   6731 		/* Sync the descriptors we're using. */
   6732 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6733 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6734 
   6735 		/* Give the packet to the chip. */
   6736 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6737 
   6738 		DPRINTF(WM_DEBUG_TX,
   6739 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6740 
   6741 		DPRINTF(WM_DEBUG_TX,
   6742 		    ("%s: TX: finished transmitting packet, job %d\n",
   6743 		    device_xname(sc->sc_dev), txq->txq_snext));
   6744 
   6745 		/* Advance the tx pointer. */
   6746 		txq->txq_free -= txs->txs_ndesc;
   6747 		txq->txq_next = nexttx;
   6748 
   6749 		txq->txq_sfree--;
   6750 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6751 
   6752 		/* Pass the packet to any BPF listeners. */
   6753 		bpf_mtap(ifp, m0);
   6754 	}
   6755 
   6756 	if (m0 != NULL) {
   6757 		ifp->if_flags |= IFF_OACTIVE;
   6758 		WM_Q_EVCNT_INCR(txq, txdrop);
   6759 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6760 			__func__));
   6761 		m_freem(m0);
   6762 	}
   6763 
   6764 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6765 		/* No more slots; notify upper layer. */
   6766 		ifp->if_flags |= IFF_OACTIVE;
   6767 	}
   6768 
   6769 	if (txq->txq_free != ofree) {
   6770 		/* Set a watchdog timer in case the chip flakes out. */
   6771 		ifp->if_timer = 5;
   6772 	}
   6773 }
   6774 
   6775 /*
   6776  * wm_nq_tx_offload:
   6777  *
   6778  *	Set up TCP/IP checksumming parameters for the
   6779  *	specified packet, for NEWQUEUE devices
   6780  */
   6781 static int
   6782 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6783     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6784 {
   6785 	struct mbuf *m0 = txs->txs_mbuf;
   6786 	struct m_tag *mtag;
   6787 	uint32_t vl_len, mssidx, cmdc;
   6788 	struct ether_header *eh;
   6789 	int offset, iphl;
   6790 
   6791 	/*
   6792 	 * XXX It would be nice if the mbuf pkthdr had offset
   6793 	 * fields for the protocol headers.
   6794 	 */
   6795 	*cmdlenp = 0;
   6796 	*fieldsp = 0;
   6797 
   6798 	eh = mtod(m0, struct ether_header *);
   6799 	switch (htons(eh->ether_type)) {
   6800 	case ETHERTYPE_IP:
   6801 	case ETHERTYPE_IPV6:
   6802 		offset = ETHER_HDR_LEN;
   6803 		break;
   6804 
   6805 	case ETHERTYPE_VLAN:
   6806 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6807 		break;
   6808 
   6809 	default:
   6810 		/* Don't support this protocol or encapsulation. */
   6811 		*do_csum = false;
   6812 		return 0;
   6813 	}
   6814 	*do_csum = true;
   6815 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6816 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6817 
   6818 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6819 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6820 
   6821 	if ((m0->m_pkthdr.csum_flags &
   6822 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6823 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6824 	} else {
   6825 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6826 	}
   6827 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6828 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6829 
   6830 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6831 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6832 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6833 		*cmdlenp |= NQTX_CMD_VLE;
   6834 	}
   6835 
   6836 	mssidx = 0;
   6837 
   6838 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6839 		int hlen = offset + iphl;
   6840 		int tcp_hlen;
   6841 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6842 
   6843 		if (__predict_false(m0->m_len <
   6844 				    (hlen + sizeof(struct tcphdr)))) {
   6845 			/*
   6846 			 * TCP/IP headers are not in the first mbuf; we need
   6847 			 * to do this the slow and painful way.  Let's just
   6848 			 * hope this doesn't happen very often.
   6849 			 */
   6850 			struct tcphdr th;
   6851 
   6852 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6853 
   6854 			m_copydata(m0, hlen, sizeof(th), &th);
   6855 			if (v4) {
   6856 				struct ip ip;
   6857 
   6858 				m_copydata(m0, offset, sizeof(ip), &ip);
   6859 				ip.ip_len = 0;
   6860 				m_copyback(m0,
   6861 				    offset + offsetof(struct ip, ip_len),
   6862 				    sizeof(ip.ip_len), &ip.ip_len);
   6863 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6864 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6865 			} else {
   6866 				struct ip6_hdr ip6;
   6867 
   6868 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6869 				ip6.ip6_plen = 0;
   6870 				m_copyback(m0,
   6871 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6872 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6873 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6874 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6875 			}
   6876 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6877 			    sizeof(th.th_sum), &th.th_sum);
   6878 
   6879 			tcp_hlen = th.th_off << 2;
   6880 		} else {
   6881 			/*
   6882 			 * TCP/IP headers are in the first mbuf; we can do
   6883 			 * this the easy way.
   6884 			 */
   6885 			struct tcphdr *th;
   6886 
   6887 			if (v4) {
   6888 				struct ip *ip =
   6889 				    (void *)(mtod(m0, char *) + offset);
   6890 				th = (void *)(mtod(m0, char *) + hlen);
   6891 
   6892 				ip->ip_len = 0;
   6893 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6894 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6895 			} else {
   6896 				struct ip6_hdr *ip6 =
   6897 				    (void *)(mtod(m0, char *) + offset);
   6898 				th = (void *)(mtod(m0, char *) + hlen);
   6899 
   6900 				ip6->ip6_plen = 0;
   6901 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6902 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6903 			}
   6904 			tcp_hlen = th->th_off << 2;
   6905 		}
   6906 		hlen += tcp_hlen;
   6907 		*cmdlenp |= NQTX_CMD_TSE;
   6908 
   6909 		if (v4) {
   6910 			WM_Q_EVCNT_INCR(txq, txtso);
   6911 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6912 		} else {
   6913 			WM_Q_EVCNT_INCR(txq, txtso6);
   6914 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6915 		}
   6916 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6917 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6918 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6919 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6920 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6921 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6922 	} else {
   6923 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6924 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6925 	}
   6926 
   6927 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6928 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6929 		cmdc |= NQTXC_CMD_IP4;
   6930 	}
   6931 
   6932 	if (m0->m_pkthdr.csum_flags &
   6933 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6934 		WM_Q_EVCNT_INCR(txq, txtusum);
   6935 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6936 			cmdc |= NQTXC_CMD_TCP;
   6937 		} else {
   6938 			cmdc |= NQTXC_CMD_UDP;
   6939 		}
   6940 		cmdc |= NQTXC_CMD_IP4;
   6941 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6942 	}
   6943 	if (m0->m_pkthdr.csum_flags &
   6944 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6945 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6946 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6947 			cmdc |= NQTXC_CMD_TCP;
   6948 		} else {
   6949 			cmdc |= NQTXC_CMD_UDP;
   6950 		}
   6951 		cmdc |= NQTXC_CMD_IP6;
   6952 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6953 	}
   6954 
   6955 	/* Fill in the context descriptor. */
   6956 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6957 	    htole32(vl_len);
   6958 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6959 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6960 	    htole32(cmdc);
   6961 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6962 	    htole32(mssidx);
   6963 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6964 	DPRINTF(WM_DEBUG_TX,
   6965 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6966 	    txq->txq_next, 0, vl_len));
   6967 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6968 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6969 	txs->txs_ndesc++;
   6970 	return 0;
   6971 }
   6972 
   6973 /*
   6974  * wm_nq_start:		[ifnet interface function]
   6975  *
   6976  *	Start packet transmission on the interface for NEWQUEUE devices
   6977  */
   6978 static void
   6979 wm_nq_start(struct ifnet *ifp)
   6980 {
   6981 	struct wm_softc *sc = ifp->if_softc;
   6982 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6983 
   6984 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6985 
   6986 	/*
   6987 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6988 	 */
   6989 
   6990 	mutex_enter(txq->txq_lock);
   6991 	if (!txq->txq_stopping)
   6992 		wm_nq_start_locked(ifp);
   6993 	mutex_exit(txq->txq_lock);
   6994 }
   6995 
   6996 static void
   6997 wm_nq_start_locked(struct ifnet *ifp)
   6998 {
   6999 	struct wm_softc *sc = ifp->if_softc;
   7000 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7001 
   7002 	wm_nq_send_common_locked(ifp, txq, false);
   7003 }
   7004 
   7005 static int
   7006 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7007 {
   7008 	int qid;
   7009 	struct wm_softc *sc = ifp->if_softc;
   7010 	struct wm_txqueue *txq;
   7011 
   7012 	qid = wm_select_txqueue(ifp, m);
   7013 	txq = &sc->sc_queue[qid].wmq_txq;
   7014 
   7015 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7016 		m_freem(m);
   7017 		WM_Q_EVCNT_INCR(txq, txdrop);
   7018 		return ENOBUFS;
   7019 	}
   7020 
   7021 	/*
   7022 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7023 	 */
   7024 	ifp->if_obytes += m->m_pkthdr.len;
   7025 	if (m->m_flags & M_MCAST)
   7026 		ifp->if_omcasts++;
   7027 
   7028 	/*
   7029 	 * The situations which this mutex_tryenter() fails at running time
   7030 	 * are below two patterns.
   7031 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7032 	 *     (2) contention with deferred if_start softint(wm_deferred_start())
   7033 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7034 	 * dequeued by wm_deferred_start(). So, it does not get stuck.
   7035 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7036 	 * dequeued by wm_deferred_start(). So, it does not get stuck, either.
   7037 	 */
   7038 	if (mutex_tryenter(txq->txq_lock)) {
   7039 		if (!txq->txq_stopping)
   7040 			wm_nq_transmit_locked(ifp, txq);
   7041 		mutex_exit(txq->txq_lock);
   7042 	}
   7043 
   7044 	return 0;
   7045 }
   7046 
   7047 static void
   7048 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7049 {
   7050 
   7051 	wm_nq_send_common_locked(ifp, txq, true);
   7052 }
   7053 
   7054 static void
   7055 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7056     bool is_transmit)
   7057 {
   7058 	struct wm_softc *sc = ifp->if_softc;
   7059 	struct mbuf *m0;
   7060 	struct m_tag *mtag;
   7061 	struct wm_txsoft *txs;
   7062 	bus_dmamap_t dmamap;
   7063 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7064 	bool do_csum, sent;
   7065 
   7066 	KASSERT(mutex_owned(txq->txq_lock));
   7067 
   7068 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   7069 		return;
   7070 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7071 		return;
   7072 
   7073 	sent = false;
   7074 
   7075 	/*
   7076 	 * Loop through the send queue, setting up transmit descriptors
   7077 	 * until we drain the queue, or use up all available transmit
   7078 	 * descriptors.
   7079 	 */
   7080 	for (;;) {
   7081 		m0 = NULL;
   7082 
   7083 		/* Get a work queue entry. */
   7084 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7085 			wm_txeof(sc, txq);
   7086 			if (txq->txq_sfree == 0) {
   7087 				DPRINTF(WM_DEBUG_TX,
   7088 				    ("%s: TX: no free job descriptors\n",
   7089 					device_xname(sc->sc_dev)));
   7090 				WM_Q_EVCNT_INCR(txq, txsstall);
   7091 				break;
   7092 			}
   7093 		}
   7094 
   7095 		/* Grab a packet off the queue. */
   7096 		if (is_transmit)
   7097 			m0 = pcq_get(txq->txq_interq);
   7098 		else
   7099 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7100 		if (m0 == NULL)
   7101 			break;
   7102 
   7103 		DPRINTF(WM_DEBUG_TX,
   7104 		    ("%s: TX: have packet to transmit: %p\n",
   7105 		    device_xname(sc->sc_dev), m0));
   7106 
   7107 		txs = &txq->txq_soft[txq->txq_snext];
   7108 		dmamap = txs->txs_dmamap;
   7109 
   7110 		/*
   7111 		 * Load the DMA map.  If this fails, the packet either
   7112 		 * didn't fit in the allotted number of segments, or we
   7113 		 * were short on resources.  For the too-many-segments
   7114 		 * case, we simply report an error and drop the packet,
   7115 		 * since we can't sanely copy a jumbo packet to a single
   7116 		 * buffer.
   7117 		 */
   7118 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7119 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7120 		if (error) {
   7121 			if (error == EFBIG) {
   7122 				WM_Q_EVCNT_INCR(txq, txdrop);
   7123 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7124 				    "DMA segments, dropping...\n",
   7125 				    device_xname(sc->sc_dev));
   7126 				wm_dump_mbuf_chain(sc, m0);
   7127 				m_freem(m0);
   7128 				continue;
   7129 			}
   7130 			/* Short on resources, just stop for now. */
   7131 			DPRINTF(WM_DEBUG_TX,
   7132 			    ("%s: TX: dmamap load failed: %d\n",
   7133 			    device_xname(sc->sc_dev), error));
   7134 			break;
   7135 		}
   7136 
   7137 		segs_needed = dmamap->dm_nsegs;
   7138 
   7139 		/*
   7140 		 * Ensure we have enough descriptors free to describe
   7141 		 * the packet.  Note, we always reserve one descriptor
   7142 		 * at the end of the ring due to the semantics of the
   7143 		 * TDT register, plus one more in the event we need
   7144 		 * to load offload context.
   7145 		 */
   7146 		if (segs_needed > txq->txq_free - 2) {
   7147 			/*
   7148 			 * Not enough free descriptors to transmit this
   7149 			 * packet.  We haven't committed anything yet,
   7150 			 * so just unload the DMA map, put the packet
   7151 			 * pack on the queue, and punt.  Notify the upper
   7152 			 * layer that there are no more slots left.
   7153 			 */
   7154 			DPRINTF(WM_DEBUG_TX,
   7155 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7156 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7157 			    segs_needed, txq->txq_free - 1));
   7158 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7159 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7160 			WM_Q_EVCNT_INCR(txq, txdstall);
   7161 			break;
   7162 		}
   7163 
   7164 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7165 
   7166 		DPRINTF(WM_DEBUG_TX,
   7167 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7168 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7169 
   7170 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7171 
   7172 		/*
   7173 		 * Store a pointer to the packet so that we can free it
   7174 		 * later.
   7175 		 *
   7176 		 * Initially, we consider the number of descriptors the
   7177 		 * packet uses the number of DMA segments.  This may be
   7178 		 * incremented by 1 if we do checksum offload (a descriptor
   7179 		 * is used to set the checksum context).
   7180 		 */
   7181 		txs->txs_mbuf = m0;
   7182 		txs->txs_firstdesc = txq->txq_next;
   7183 		txs->txs_ndesc = segs_needed;
   7184 
   7185 		/* Set up offload parameters for this packet. */
   7186 		uint32_t cmdlen, fields, dcmdlen;
   7187 		if (m0->m_pkthdr.csum_flags &
   7188 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7189 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7190 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7191 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7192 			    &do_csum) != 0) {
   7193 				/* Error message already displayed. */
   7194 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7195 				continue;
   7196 			}
   7197 		} else {
   7198 			do_csum = false;
   7199 			cmdlen = 0;
   7200 			fields = 0;
   7201 		}
   7202 
   7203 		/* Sync the DMA map. */
   7204 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7205 		    BUS_DMASYNC_PREWRITE);
   7206 
   7207 		/* Initialize the first transmit descriptor. */
   7208 		nexttx = txq->txq_next;
   7209 		if (!do_csum) {
   7210 			/* setup a legacy descriptor */
   7211 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7212 			    dmamap->dm_segs[0].ds_addr);
   7213 			txq->txq_descs[nexttx].wtx_cmdlen =
   7214 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7215 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7216 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7217 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7218 			    NULL) {
   7219 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7220 				    htole32(WTX_CMD_VLE);
   7221 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7222 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7223 			} else {
   7224 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7225 			}
   7226 			dcmdlen = 0;
   7227 		} else {
   7228 			/* setup an advanced data descriptor */
   7229 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7230 			    htole64(dmamap->dm_segs[0].ds_addr);
   7231 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7232 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7233 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7234 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7235 			    htole32(fields);
   7236 			DPRINTF(WM_DEBUG_TX,
   7237 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7238 			    device_xname(sc->sc_dev), nexttx,
   7239 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7240 			DPRINTF(WM_DEBUG_TX,
   7241 			    ("\t 0x%08x%08x\n", fields,
   7242 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7243 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7244 		}
   7245 
   7246 		lasttx = nexttx;
   7247 		nexttx = WM_NEXTTX(txq, nexttx);
   7248 		/*
   7249 		 * fill in the next descriptors. legacy or adcanced format
   7250 		 * is the same here
   7251 		 */
   7252 		for (seg = 1; seg < dmamap->dm_nsegs;
   7253 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7254 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7255 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7256 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7257 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7258 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7259 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7260 			lasttx = nexttx;
   7261 
   7262 			DPRINTF(WM_DEBUG_TX,
   7263 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7264 			     "len %#04zx\n",
   7265 			    device_xname(sc->sc_dev), nexttx,
   7266 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7267 			    dmamap->dm_segs[seg].ds_len));
   7268 		}
   7269 
   7270 		KASSERT(lasttx != -1);
   7271 
   7272 		/*
   7273 		 * Set up the command byte on the last descriptor of
   7274 		 * the packet.  If we're in the interrupt delay window,
   7275 		 * delay the interrupt.
   7276 		 */
   7277 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7278 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7279 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7280 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7281 
   7282 		txs->txs_lastdesc = lasttx;
   7283 
   7284 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7285 		    device_xname(sc->sc_dev),
   7286 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7287 
   7288 		/* Sync the descriptors we're using. */
   7289 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7290 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7291 
   7292 		/* Give the packet to the chip. */
   7293 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7294 		sent = true;
   7295 
   7296 		DPRINTF(WM_DEBUG_TX,
   7297 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7298 
   7299 		DPRINTF(WM_DEBUG_TX,
   7300 		    ("%s: TX: finished transmitting packet, job %d\n",
   7301 		    device_xname(sc->sc_dev), txq->txq_snext));
   7302 
   7303 		/* Advance the tx pointer. */
   7304 		txq->txq_free -= txs->txs_ndesc;
   7305 		txq->txq_next = nexttx;
   7306 
   7307 		txq->txq_sfree--;
   7308 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7309 
   7310 		/* Pass the packet to any BPF listeners. */
   7311 		bpf_mtap(ifp, m0);
   7312 	}
   7313 
   7314 	if (m0 != NULL) {
   7315 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7316 		WM_Q_EVCNT_INCR(txq, txdrop);
   7317 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7318 			__func__));
   7319 		m_freem(m0);
   7320 	}
   7321 
   7322 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7323 		/* No more slots; notify upper layer. */
   7324 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7325 	}
   7326 
   7327 	if (sent) {
   7328 		/* Set a watchdog timer in case the chip flakes out. */
   7329 		ifp->if_timer = 5;
   7330 	}
   7331 }
   7332 
   7333 static void
   7334 wm_deferred_start(struct ifnet *ifp)
   7335 {
   7336 	struct wm_softc *sc = ifp->if_softc;
   7337 	int qid = 0;
   7338 
   7339 	/*
   7340 	 * Try to transmit on all Tx queues. Passing a txq somehow and
   7341 	 * transmitting only on the txq may be better.
   7342 	 */
   7343 	for (; qid < sc->sc_nqueues; qid++) {
   7344 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   7345 
   7346 		/*
   7347 		 * We must mutex_enter(txq->txq_lock) instead of
   7348 		 * mutex_tryenter(txq->txq_lock) here.
   7349 		 * mutex_tryenter(txq->txq_lock) would fail as this txq's
   7350 		 * txq_stopping flag is being set. In this case, this device
   7351 		 * begin to stop, so we must not start any Tx processing.
   7352 		 * However, it may start Tx processing for sc_queue[qid+1]
   7353 		 * if we use mutex_tryenter() here.
   7354 		 */
   7355 		mutex_enter(txq->txq_lock);
   7356 		if (txq->txq_stopping) {
   7357 			mutex_exit(txq->txq_lock);
   7358 			return;
   7359 		}
   7360 
   7361 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7362 			/* XXX need for ALTQ */
   7363 			if (qid == 0)
   7364 				wm_nq_start_locked(ifp);
   7365 			wm_nq_transmit_locked(ifp, txq);
   7366 		} else {
   7367 			/* XXX need for ALTQ */
   7368 			if (qid == 0)
   7369 				wm_start_locked(ifp);
   7370 			wm_transmit_locked(ifp, txq);
   7371 		}
   7372 		mutex_exit(txq->txq_lock);
   7373 	}
   7374 }
   7375 
   7376 /* Interrupt */
   7377 
   7378 /*
   7379  * wm_txeof:
   7380  *
   7381  *	Helper; handle transmit interrupts.
   7382  */
   7383 static int
   7384 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7385 {
   7386 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7387 	struct wm_txsoft *txs;
   7388 	bool processed = false;
   7389 	int count = 0;
   7390 	int i;
   7391 	uint8_t status;
   7392 
   7393 	KASSERT(mutex_owned(txq->txq_lock));
   7394 
   7395 	if (txq->txq_stopping)
   7396 		return 0;
   7397 
   7398 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7399 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7400 	else
   7401 		ifp->if_flags &= ~IFF_OACTIVE;
   7402 
   7403 	/*
   7404 	 * Go through the Tx list and free mbufs for those
   7405 	 * frames which have been transmitted.
   7406 	 */
   7407 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7408 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7409 		txs = &txq->txq_soft[i];
   7410 
   7411 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7412 			device_xname(sc->sc_dev), i));
   7413 
   7414 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7415 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7416 
   7417 		status =
   7418 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7419 		if ((status & WTX_ST_DD) == 0) {
   7420 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7421 			    BUS_DMASYNC_PREREAD);
   7422 			break;
   7423 		}
   7424 
   7425 		processed = true;
   7426 		count++;
   7427 		DPRINTF(WM_DEBUG_TX,
   7428 		    ("%s: TX: job %d done: descs %d..%d\n",
   7429 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7430 		    txs->txs_lastdesc));
   7431 
   7432 		/*
   7433 		 * XXX We should probably be using the statistics
   7434 		 * XXX registers, but I don't know if they exist
   7435 		 * XXX on chips before the i82544.
   7436 		 */
   7437 
   7438 #ifdef WM_EVENT_COUNTERS
   7439 		if (status & WTX_ST_TU)
   7440 			WM_Q_EVCNT_INCR(txq, tu);
   7441 #endif /* WM_EVENT_COUNTERS */
   7442 
   7443 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7444 			ifp->if_oerrors++;
   7445 			if (status & WTX_ST_LC)
   7446 				log(LOG_WARNING, "%s: late collision\n",
   7447 				    device_xname(sc->sc_dev));
   7448 			else if (status & WTX_ST_EC) {
   7449 				ifp->if_collisions += 16;
   7450 				log(LOG_WARNING, "%s: excessive collisions\n",
   7451 				    device_xname(sc->sc_dev));
   7452 			}
   7453 		} else
   7454 			ifp->if_opackets++;
   7455 
   7456 		txq->txq_free += txs->txs_ndesc;
   7457 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7458 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7459 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7460 		m_freem(txs->txs_mbuf);
   7461 		txs->txs_mbuf = NULL;
   7462 	}
   7463 
   7464 	/* Update the dirty transmit buffer pointer. */
   7465 	txq->txq_sdirty = i;
   7466 	DPRINTF(WM_DEBUG_TX,
   7467 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7468 
   7469 	if (count != 0)
   7470 		rnd_add_uint32(&sc->rnd_source, count);
   7471 
   7472 	/*
   7473 	 * If there are no more pending transmissions, cancel the watchdog
   7474 	 * timer.
   7475 	 */
   7476 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7477 		ifp->if_timer = 0;
   7478 
   7479 	return processed;
   7480 }
   7481 
   7482 static inline uint32_t
   7483 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7484 {
   7485 	struct wm_softc *sc = rxq->rxq_sc;
   7486 
   7487 	if (sc->sc_type == WM_T_82574)
   7488 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7489 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7490 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7491 	else
   7492 		return rxq->rxq_descs[idx].wrx_status;
   7493 }
   7494 
   7495 static inline uint32_t
   7496 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7497 {
   7498 	struct wm_softc *sc = rxq->rxq_sc;
   7499 
   7500 	if (sc->sc_type == WM_T_82574)
   7501 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7502 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7503 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7504 	else
   7505 		return rxq->rxq_descs[idx].wrx_errors;
   7506 }
   7507 
   7508 static inline uint16_t
   7509 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7510 {
   7511 	struct wm_softc *sc = rxq->rxq_sc;
   7512 
   7513 	if (sc->sc_type == WM_T_82574)
   7514 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7515 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7516 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7517 	else
   7518 		return rxq->rxq_descs[idx].wrx_special;
   7519 }
   7520 
   7521 static inline int
   7522 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7523 {
   7524 	struct wm_softc *sc = rxq->rxq_sc;
   7525 
   7526 	if (sc->sc_type == WM_T_82574)
   7527 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   7528 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7529 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   7530 	else
   7531 		return rxq->rxq_descs[idx].wrx_len;
   7532 }
   7533 
   7534 #ifdef WM_DEBUG
   7535 static inline uint32_t
   7536 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   7537 {
   7538 	struct wm_softc *sc = rxq->rxq_sc;
   7539 
   7540 	if (sc->sc_type == WM_T_82574)
   7541 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   7542 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7543 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   7544 	else
   7545 		return 0;
   7546 }
   7547 
   7548 static inline uint8_t
   7549 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   7550 {
   7551 	struct wm_softc *sc = rxq->rxq_sc;
   7552 
   7553 	if (sc->sc_type == WM_T_82574)
   7554 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   7555 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7556 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   7557 	else
   7558 		return 0;
   7559 }
   7560 #endif /* WM_DEBUG */
   7561 
   7562 static inline bool
   7563 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   7564     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7565 {
   7566 
   7567 	if (sc->sc_type == WM_T_82574)
   7568 		return (status & ext_bit) != 0;
   7569 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7570 		return (status & nq_bit) != 0;
   7571 	else
   7572 		return (status & legacy_bit) != 0;
   7573 }
   7574 
   7575 static inline bool
   7576 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   7577     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7578 {
   7579 
   7580 	if (sc->sc_type == WM_T_82574)
   7581 		return (error & ext_bit) != 0;
   7582 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7583 		return (error & nq_bit) != 0;
   7584 	else
   7585 		return (error & legacy_bit) != 0;
   7586 }
   7587 
   7588 static inline bool
   7589 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   7590 {
   7591 
   7592 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7593 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   7594 		return true;
   7595 	else
   7596 		return false;
   7597 }
   7598 
   7599 static inline bool
   7600 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   7601 {
   7602 	struct wm_softc *sc = rxq->rxq_sc;
   7603 
   7604 	/* XXXX missing error bit for newqueue? */
   7605 	if (wm_rxdesc_is_set_error(sc, errors,
   7606 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   7607 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   7608 		NQRXC_ERROR_RXE)) {
   7609 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   7610 			log(LOG_WARNING, "%s: symbol error\n",
   7611 			    device_xname(sc->sc_dev));
   7612 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   7613 			log(LOG_WARNING, "%s: receive sequence error\n",
   7614 			    device_xname(sc->sc_dev));
   7615 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   7616 			log(LOG_WARNING, "%s: CRC error\n",
   7617 			    device_xname(sc->sc_dev));
   7618 		return true;
   7619 	}
   7620 
   7621 	return false;
   7622 }
   7623 
   7624 static inline bool
   7625 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   7626 {
   7627 	struct wm_softc *sc = rxq->rxq_sc;
   7628 
   7629 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   7630 		NQRXC_STATUS_DD)) {
   7631 		/* We have processed all of the receive descriptors. */
   7632 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   7633 		return false;
   7634 	}
   7635 
   7636 	return true;
   7637 }
   7638 
   7639 static inline bool
   7640 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   7641     struct mbuf *m)
   7642 {
   7643 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   7644 
   7645 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7646 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   7647 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   7648 	}
   7649 
   7650 	return true;
   7651 }
   7652 
   7653 static inline void
   7654 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   7655     uint32_t errors, struct mbuf *m)
   7656 {
   7657 	struct wm_softc *sc = rxq->rxq_sc;
   7658 
   7659 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   7660 		if (wm_rxdesc_is_set_status(sc, status,
   7661 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   7662 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   7663 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7664 			if (wm_rxdesc_is_set_error(sc, errors,
   7665 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   7666 				m->m_pkthdr.csum_flags |=
   7667 					M_CSUM_IPv4_BAD;
   7668 		}
   7669 		if (wm_rxdesc_is_set_status(sc, status,
   7670 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   7671 			/*
   7672 			 * Note: we don't know if this was TCP or UDP,
   7673 			 * so we just set both bits, and expect the
   7674 			 * upper layers to deal.
   7675 			 */
   7676 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   7677 			m->m_pkthdr.csum_flags |=
   7678 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7679 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7680 			if (wm_rxdesc_is_set_error(sc, errors,
   7681 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   7682 				m->m_pkthdr.csum_flags |=
   7683 					M_CSUM_TCP_UDP_BAD;
   7684 		}
   7685 	}
   7686 }
   7687 
   7688 /*
   7689  * wm_rxeof:
   7690  *
   7691  *	Helper; handle receive interrupts.
   7692  */
   7693 static void
   7694 wm_rxeof(struct wm_rxqueue *rxq)
   7695 {
   7696 	struct wm_softc *sc = rxq->rxq_sc;
   7697 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7698 	struct wm_rxsoft *rxs;
   7699 	struct mbuf *m;
   7700 	int i, len;
   7701 	int count = 0;
   7702 	uint32_t status, errors;
   7703 	uint16_t vlantag;
   7704 
   7705 	KASSERT(mutex_owned(rxq->rxq_lock));
   7706 
   7707 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7708 		rxs = &rxq->rxq_soft[i];
   7709 
   7710 		DPRINTF(WM_DEBUG_RX,
   7711 		    ("%s: RX: checking descriptor %d\n",
   7712 		    device_xname(sc->sc_dev), i));
   7713 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7714 
   7715 		status = wm_rxdesc_get_status(rxq, i);
   7716 		errors = wm_rxdesc_get_errors(rxq, i);
   7717 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   7718 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   7719 #ifdef WM_DEBUG
   7720 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   7721 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   7722 #endif
   7723 
   7724 		if (!wm_rxdesc_dd(rxq, i, status))
   7725 			break;
   7726 
   7727 		count++;
   7728 		if (__predict_false(rxq->rxq_discard)) {
   7729 			DPRINTF(WM_DEBUG_RX,
   7730 			    ("%s: RX: discarding contents of descriptor %d\n",
   7731 			    device_xname(sc->sc_dev), i));
   7732 			wm_init_rxdesc(rxq, i);
   7733 			if (wm_rxdesc_is_eop(rxq, status)) {
   7734 				/* Reset our state. */
   7735 				DPRINTF(WM_DEBUG_RX,
   7736 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7737 				    device_xname(sc->sc_dev)));
   7738 				rxq->rxq_discard = 0;
   7739 			}
   7740 			continue;
   7741 		}
   7742 
   7743 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7744 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7745 
   7746 		m = rxs->rxs_mbuf;
   7747 
   7748 		/*
   7749 		 * Add a new receive buffer to the ring, unless of
   7750 		 * course the length is zero. Treat the latter as a
   7751 		 * failed mapping.
   7752 		 */
   7753 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7754 			/*
   7755 			 * Failed, throw away what we've done so
   7756 			 * far, and discard the rest of the packet.
   7757 			 */
   7758 			ifp->if_ierrors++;
   7759 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7760 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7761 			wm_init_rxdesc(rxq, i);
   7762 			if (!wm_rxdesc_is_eop(rxq, status))
   7763 				rxq->rxq_discard = 1;
   7764 			if (rxq->rxq_head != NULL)
   7765 				m_freem(rxq->rxq_head);
   7766 			WM_RXCHAIN_RESET(rxq);
   7767 			DPRINTF(WM_DEBUG_RX,
   7768 			    ("%s: RX: Rx buffer allocation failed, "
   7769 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7770 			    rxq->rxq_discard ? " (discard)" : ""));
   7771 			continue;
   7772 		}
   7773 
   7774 		m->m_len = len;
   7775 		rxq->rxq_len += len;
   7776 		DPRINTF(WM_DEBUG_RX,
   7777 		    ("%s: RX: buffer at %p len %d\n",
   7778 		    device_xname(sc->sc_dev), m->m_data, len));
   7779 
   7780 		/* If this is not the end of the packet, keep looking. */
   7781 		if (!wm_rxdesc_is_eop(rxq, status)) {
   7782 			WM_RXCHAIN_LINK(rxq, m);
   7783 			DPRINTF(WM_DEBUG_RX,
   7784 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7785 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7786 			continue;
   7787 		}
   7788 
   7789 		/*
   7790 		 * Okay, we have the entire packet now.  The chip is
   7791 		 * configured to include the FCS except I350 and I21[01]
   7792 		 * (not all chips can be configured to strip it),
   7793 		 * so we need to trim it.
   7794 		 * May need to adjust length of previous mbuf in the
   7795 		 * chain if the current mbuf is too short.
   7796 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7797 		 * is always set in I350, so we don't trim it.
   7798 		 */
   7799 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7800 		    && (sc->sc_type != WM_T_I210)
   7801 		    && (sc->sc_type != WM_T_I211)) {
   7802 			if (m->m_len < ETHER_CRC_LEN) {
   7803 				rxq->rxq_tail->m_len
   7804 				    -= (ETHER_CRC_LEN - m->m_len);
   7805 				m->m_len = 0;
   7806 			} else
   7807 				m->m_len -= ETHER_CRC_LEN;
   7808 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7809 		} else
   7810 			len = rxq->rxq_len;
   7811 
   7812 		WM_RXCHAIN_LINK(rxq, m);
   7813 
   7814 		*rxq->rxq_tailp = NULL;
   7815 		m = rxq->rxq_head;
   7816 
   7817 		WM_RXCHAIN_RESET(rxq);
   7818 
   7819 		DPRINTF(WM_DEBUG_RX,
   7820 		    ("%s: RX: have entire packet, len -> %d\n",
   7821 		    device_xname(sc->sc_dev), len));
   7822 
   7823 		/* If an error occurred, update stats and drop the packet. */
   7824 		if (wm_rxdesc_has_errors(rxq, errors)) {
   7825 			m_freem(m);
   7826 			continue;
   7827 		}
   7828 
   7829 		/* No errors.  Receive the packet. */
   7830 		m_set_rcvif(m, ifp);
   7831 		m->m_pkthdr.len = len;
   7832 		/*
   7833 		 * TODO
   7834 		 * should be save rsshash and rsstype to this mbuf.
   7835 		 */
   7836 		DPRINTF(WM_DEBUG_RX,
   7837 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   7838 			device_xname(sc->sc_dev), rsstype, rsshash));
   7839 
   7840 		/*
   7841 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7842 		 * for us.  Associate the tag with the packet.
   7843 		 */
   7844 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   7845 			continue;
   7846 
   7847 		/* Set up checksum info for this packet. */
   7848 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   7849 
   7850 		mutex_exit(rxq->rxq_lock);
   7851 
   7852 		/* Pass it on. */
   7853 		if_percpuq_enqueue(sc->sc_ipq, m);
   7854 
   7855 		mutex_enter(rxq->rxq_lock);
   7856 
   7857 		if (rxq->rxq_stopping)
   7858 			break;
   7859 	}
   7860 
   7861 	/* Update the receive pointer. */
   7862 	rxq->rxq_ptr = i;
   7863 	if (count != 0)
   7864 		rnd_add_uint32(&sc->rnd_source, count);
   7865 
   7866 	DPRINTF(WM_DEBUG_RX,
   7867 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7868 }
   7869 
   7870 /*
   7871  * wm_linkintr_gmii:
   7872  *
   7873  *	Helper; handle link interrupts for GMII.
   7874  */
   7875 static void
   7876 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7877 {
   7878 
   7879 	KASSERT(WM_CORE_LOCKED(sc));
   7880 
   7881 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7882 		__func__));
   7883 
   7884 	if (icr & ICR_LSC) {
   7885 		uint32_t reg;
   7886 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7887 
   7888 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7889 			wm_gig_downshift_workaround_ich8lan(sc);
   7890 
   7891 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7892 			device_xname(sc->sc_dev)));
   7893 		mii_pollstat(&sc->sc_mii);
   7894 		if (sc->sc_type == WM_T_82543) {
   7895 			int miistatus, active;
   7896 
   7897 			/*
   7898 			 * With 82543, we need to force speed and
   7899 			 * duplex on the MAC equal to what the PHY
   7900 			 * speed and duplex configuration is.
   7901 			 */
   7902 			miistatus = sc->sc_mii.mii_media_status;
   7903 
   7904 			if (miistatus & IFM_ACTIVE) {
   7905 				active = sc->sc_mii.mii_media_active;
   7906 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7907 				switch (IFM_SUBTYPE(active)) {
   7908 				case IFM_10_T:
   7909 					sc->sc_ctrl |= CTRL_SPEED_10;
   7910 					break;
   7911 				case IFM_100_TX:
   7912 					sc->sc_ctrl |= CTRL_SPEED_100;
   7913 					break;
   7914 				case IFM_1000_T:
   7915 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7916 					break;
   7917 				default:
   7918 					/*
   7919 					 * fiber?
   7920 					 * Shoud not enter here.
   7921 					 */
   7922 					printf("unknown media (%x)\n", active);
   7923 					break;
   7924 				}
   7925 				if (active & IFM_FDX)
   7926 					sc->sc_ctrl |= CTRL_FD;
   7927 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7928 			}
   7929 		} else if ((sc->sc_type == WM_T_ICH8)
   7930 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7931 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7932 		} else if (sc->sc_type == WM_T_PCH) {
   7933 			wm_k1_gig_workaround_hv(sc,
   7934 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7935 		}
   7936 
   7937 		if ((sc->sc_phytype == WMPHY_82578)
   7938 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7939 			== IFM_1000_T)) {
   7940 
   7941 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7942 				delay(200*1000); /* XXX too big */
   7943 
   7944 				/* Link stall fix for link up */
   7945 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7946 				    HV_MUX_DATA_CTRL,
   7947 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7948 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7949 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7950 				    HV_MUX_DATA_CTRL,
   7951 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7952 			}
   7953 		}
   7954 		/*
   7955 		 * I217 Packet Loss issue:
   7956 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   7957 		 * on power up.
   7958 		 * Set the Beacon Duration for I217 to 8 usec
   7959 		 */
   7960 		if ((sc->sc_type == WM_T_PCH_LPT)
   7961 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   7962 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   7963 			reg &= ~FEXTNVM4_BEACON_DURATION;
   7964 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   7965 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   7966 		}
   7967 
   7968 		/* XXX Work-around I218 hang issue */
   7969 		/* e1000_k1_workaround_lpt_lp() */
   7970 
   7971 		if ((sc->sc_type == WM_T_PCH_LPT)
   7972 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   7973 			/*
   7974 			 * Set platform power management values for Latency
   7975 			 * Tolerance Reporting (LTR)
   7976 			 */
   7977 			wm_platform_pm_pch_lpt(sc,
   7978 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   7979 				    != 0));
   7980 		}
   7981 
   7982 		/* FEXTNVM6 K1-off workaround */
   7983 		if (sc->sc_type == WM_T_PCH_SPT) {
   7984 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   7985 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   7986 			    & FEXTNVM6_K1_OFF_ENABLE)
   7987 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   7988 			else
   7989 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   7990 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   7991 		}
   7992 	} else if (icr & ICR_RXSEQ) {
   7993 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7994 			device_xname(sc->sc_dev)));
   7995 	}
   7996 }
   7997 
   7998 /*
   7999  * wm_linkintr_tbi:
   8000  *
   8001  *	Helper; handle link interrupts for TBI mode.
   8002  */
   8003 static void
   8004 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8005 {
   8006 	uint32_t status;
   8007 
   8008 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8009 		__func__));
   8010 
   8011 	status = CSR_READ(sc, WMREG_STATUS);
   8012 	if (icr & ICR_LSC) {
   8013 		if (status & STATUS_LU) {
   8014 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8015 			    device_xname(sc->sc_dev),
   8016 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8017 			/*
   8018 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8019 			 * so we should update sc->sc_ctrl
   8020 			 */
   8021 
   8022 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8023 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8024 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8025 			if (status & STATUS_FD)
   8026 				sc->sc_tctl |=
   8027 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8028 			else
   8029 				sc->sc_tctl |=
   8030 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8031 			if (sc->sc_ctrl & CTRL_TFCE)
   8032 				sc->sc_fcrtl |= FCRTL_XONE;
   8033 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8034 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8035 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8036 				      sc->sc_fcrtl);
   8037 			sc->sc_tbi_linkup = 1;
   8038 		} else {
   8039 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8040 			    device_xname(sc->sc_dev)));
   8041 			sc->sc_tbi_linkup = 0;
   8042 		}
   8043 		/* Update LED */
   8044 		wm_tbi_serdes_set_linkled(sc);
   8045 	} else if (icr & ICR_RXSEQ) {
   8046 		DPRINTF(WM_DEBUG_LINK,
   8047 		    ("%s: LINK: Receive sequence error\n",
   8048 		    device_xname(sc->sc_dev)));
   8049 	}
   8050 }
   8051 
   8052 /*
   8053  * wm_linkintr_serdes:
   8054  *
   8055  *	Helper; handle link interrupts for TBI mode.
   8056  */
   8057 static void
   8058 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8059 {
   8060 	struct mii_data *mii = &sc->sc_mii;
   8061 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8062 	uint32_t pcs_adv, pcs_lpab, reg;
   8063 
   8064 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8065 		__func__));
   8066 
   8067 	if (icr & ICR_LSC) {
   8068 		/* Check PCS */
   8069 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8070 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8071 			mii->mii_media_status |= IFM_ACTIVE;
   8072 			sc->sc_tbi_linkup = 1;
   8073 		} else {
   8074 			mii->mii_media_status |= IFM_NONE;
   8075 			sc->sc_tbi_linkup = 0;
   8076 			wm_tbi_serdes_set_linkled(sc);
   8077 			return;
   8078 		}
   8079 		mii->mii_media_active |= IFM_1000_SX;
   8080 		if ((reg & PCS_LSTS_FDX) != 0)
   8081 			mii->mii_media_active |= IFM_FDX;
   8082 		else
   8083 			mii->mii_media_active |= IFM_HDX;
   8084 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8085 			/* Check flow */
   8086 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8087 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8088 				DPRINTF(WM_DEBUG_LINK,
   8089 				    ("XXX LINKOK but not ACOMP\n"));
   8090 				return;
   8091 			}
   8092 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8093 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8094 			DPRINTF(WM_DEBUG_LINK,
   8095 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8096 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8097 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8098 				mii->mii_media_active |= IFM_FLOW
   8099 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8100 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8101 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8102 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8103 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8104 				mii->mii_media_active |= IFM_FLOW
   8105 				    | IFM_ETH_TXPAUSE;
   8106 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8107 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8108 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8109 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8110 				mii->mii_media_active |= IFM_FLOW
   8111 				    | IFM_ETH_RXPAUSE;
   8112 		}
   8113 		/* Update LED */
   8114 		wm_tbi_serdes_set_linkled(sc);
   8115 	} else {
   8116 		DPRINTF(WM_DEBUG_LINK,
   8117 		    ("%s: LINK: Receive sequence error\n",
   8118 		    device_xname(sc->sc_dev)));
   8119 	}
   8120 }
   8121 
   8122 /*
   8123  * wm_linkintr:
   8124  *
   8125  *	Helper; handle link interrupts.
   8126  */
   8127 static void
   8128 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8129 {
   8130 
   8131 	KASSERT(WM_CORE_LOCKED(sc));
   8132 
   8133 	if (sc->sc_flags & WM_F_HAS_MII)
   8134 		wm_linkintr_gmii(sc, icr);
   8135 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8136 	    && (sc->sc_type >= WM_T_82575))
   8137 		wm_linkintr_serdes(sc, icr);
   8138 	else
   8139 		wm_linkintr_tbi(sc, icr);
   8140 }
   8141 
   8142 /*
   8143  * wm_intr_legacy:
   8144  *
   8145  *	Interrupt service routine for INTx and MSI.
   8146  */
   8147 static int
   8148 wm_intr_legacy(void *arg)
   8149 {
   8150 	struct wm_softc *sc = arg;
   8151 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   8152 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   8153 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8154 	uint32_t icr, rndval = 0;
   8155 	int handled = 0;
   8156 
   8157 	DPRINTF(WM_DEBUG_TX,
   8158 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   8159 	while (1 /* CONSTCOND */) {
   8160 		icr = CSR_READ(sc, WMREG_ICR);
   8161 		if ((icr & sc->sc_icr) == 0)
   8162 			break;
   8163 		if (rndval == 0)
   8164 			rndval = icr;
   8165 
   8166 		mutex_enter(rxq->rxq_lock);
   8167 
   8168 		if (rxq->rxq_stopping) {
   8169 			mutex_exit(rxq->rxq_lock);
   8170 			break;
   8171 		}
   8172 
   8173 		handled = 1;
   8174 
   8175 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8176 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8177 			DPRINTF(WM_DEBUG_RX,
   8178 			    ("%s: RX: got Rx intr 0x%08x\n",
   8179 			    device_xname(sc->sc_dev),
   8180 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8181 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8182 		}
   8183 #endif
   8184 		wm_rxeof(rxq);
   8185 
   8186 		mutex_exit(rxq->rxq_lock);
   8187 		mutex_enter(txq->txq_lock);
   8188 
   8189 		if (txq->txq_stopping) {
   8190 			mutex_exit(txq->txq_lock);
   8191 			break;
   8192 		}
   8193 
   8194 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8195 		if (icr & ICR_TXDW) {
   8196 			DPRINTF(WM_DEBUG_TX,
   8197 			    ("%s: TX: got TXDW interrupt\n",
   8198 			    device_xname(sc->sc_dev)));
   8199 			WM_Q_EVCNT_INCR(txq, txdw);
   8200 		}
   8201 #endif
   8202 		wm_txeof(sc, txq);
   8203 
   8204 		mutex_exit(txq->txq_lock);
   8205 		WM_CORE_LOCK(sc);
   8206 
   8207 		if (sc->sc_core_stopping) {
   8208 			WM_CORE_UNLOCK(sc);
   8209 			break;
   8210 		}
   8211 
   8212 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8213 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8214 			wm_linkintr(sc, icr);
   8215 		}
   8216 
   8217 		WM_CORE_UNLOCK(sc);
   8218 
   8219 		if (icr & ICR_RXO) {
   8220 #if defined(WM_DEBUG)
   8221 			log(LOG_WARNING, "%s: Receive overrun\n",
   8222 			    device_xname(sc->sc_dev));
   8223 #endif /* defined(WM_DEBUG) */
   8224 		}
   8225 	}
   8226 
   8227 	rnd_add_uint32(&sc->rnd_source, rndval);
   8228 
   8229 	if (handled) {
   8230 		/* Try to get more packets going. */
   8231 		if_schedule_deferred_start(ifp);
   8232 	}
   8233 
   8234 	return handled;
   8235 }
   8236 
   8237 static int
   8238 wm_txrxintr_msix(void *arg)
   8239 {
   8240 	struct wm_queue *wmq = arg;
   8241 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8242 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8243 	struct wm_softc *sc = txq->txq_sc;
   8244 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8245 
   8246 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8247 
   8248 	DPRINTF(WM_DEBUG_TX,
   8249 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8250 
   8251 	if (sc->sc_type == WM_T_82574)
   8252 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8253 	else if (sc->sc_type == WM_T_82575)
   8254 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8255 	else
   8256 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8257 
   8258 	mutex_enter(txq->txq_lock);
   8259 
   8260 	if (txq->txq_stopping) {
   8261 		mutex_exit(txq->txq_lock);
   8262 		return 0;
   8263 	}
   8264 
   8265 	WM_Q_EVCNT_INCR(txq, txdw);
   8266 	wm_txeof(sc, txq);
   8267 
   8268 	/* Try to get more packets going. */
   8269 	if (pcq_peek(txq->txq_interq) != NULL)
   8270 		if_schedule_deferred_start(ifp);
   8271 	/*
   8272 	 * There are still some upper layer processing which call
   8273 	 * ifp->if_start(). e.g. ALTQ
   8274 	 */
   8275 	if (wmq->wmq_id == 0)
   8276 		if_schedule_deferred_start(ifp);
   8277 
   8278 	mutex_exit(txq->txq_lock);
   8279 
   8280 	DPRINTF(WM_DEBUG_RX,
   8281 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8282 	mutex_enter(rxq->rxq_lock);
   8283 
   8284 	if (rxq->rxq_stopping) {
   8285 		mutex_exit(rxq->rxq_lock);
   8286 		return 0;
   8287 	}
   8288 
   8289 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8290 	wm_rxeof(rxq);
   8291 	mutex_exit(rxq->rxq_lock);
   8292 
   8293 	if (sc->sc_type == WM_T_82574)
   8294 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8295 	else if (sc->sc_type == WM_T_82575)
   8296 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8297 	else
   8298 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8299 
   8300 	return 1;
   8301 }
   8302 
   8303 /*
   8304  * wm_linkintr_msix:
   8305  *
   8306  *	Interrupt service routine for link status change for MSI-X.
   8307  */
   8308 static int
   8309 wm_linkintr_msix(void *arg)
   8310 {
   8311 	struct wm_softc *sc = arg;
   8312 	uint32_t reg;
   8313 
   8314 	DPRINTF(WM_DEBUG_LINK,
   8315 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8316 
   8317 	reg = CSR_READ(sc, WMREG_ICR);
   8318 	WM_CORE_LOCK(sc);
   8319 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8320 		goto out;
   8321 
   8322 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8323 	wm_linkintr(sc, ICR_LSC);
   8324 
   8325 out:
   8326 	WM_CORE_UNLOCK(sc);
   8327 
   8328 	if (sc->sc_type == WM_T_82574)
   8329 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8330 	else if (sc->sc_type == WM_T_82575)
   8331 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8332 	else
   8333 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8334 
   8335 	return 1;
   8336 }
   8337 
   8338 /*
   8339  * Media related.
   8340  * GMII, SGMII, TBI (and SERDES)
   8341  */
   8342 
   8343 /* Common */
   8344 
   8345 /*
   8346  * wm_tbi_serdes_set_linkled:
   8347  *
   8348  *	Update the link LED on TBI and SERDES devices.
   8349  */
   8350 static void
   8351 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8352 {
   8353 
   8354 	if (sc->sc_tbi_linkup)
   8355 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8356 	else
   8357 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8358 
   8359 	/* 82540 or newer devices are active low */
   8360 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8361 
   8362 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8363 }
   8364 
   8365 /* GMII related */
   8366 
   8367 /*
   8368  * wm_gmii_reset:
   8369  *
   8370  *	Reset the PHY.
   8371  */
   8372 static void
   8373 wm_gmii_reset(struct wm_softc *sc)
   8374 {
   8375 	uint32_t reg;
   8376 	int rv;
   8377 
   8378 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8379 		device_xname(sc->sc_dev), __func__));
   8380 
   8381 	rv = sc->phy.acquire(sc);
   8382 	if (rv != 0) {
   8383 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8384 		    __func__);
   8385 		return;
   8386 	}
   8387 
   8388 	switch (sc->sc_type) {
   8389 	case WM_T_82542_2_0:
   8390 	case WM_T_82542_2_1:
   8391 		/* null */
   8392 		break;
   8393 	case WM_T_82543:
   8394 		/*
   8395 		 * With 82543, we need to force speed and duplex on the MAC
   8396 		 * equal to what the PHY speed and duplex configuration is.
   8397 		 * In addition, we need to perform a hardware reset on the PHY
   8398 		 * to take it out of reset.
   8399 		 */
   8400 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8401 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8402 
   8403 		/* The PHY reset pin is active-low. */
   8404 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8405 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8406 		    CTRL_EXT_SWDPIN(4));
   8407 		reg |= CTRL_EXT_SWDPIO(4);
   8408 
   8409 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8410 		CSR_WRITE_FLUSH(sc);
   8411 		delay(10*1000);
   8412 
   8413 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8414 		CSR_WRITE_FLUSH(sc);
   8415 		delay(150);
   8416 #if 0
   8417 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8418 #endif
   8419 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8420 		break;
   8421 	case WM_T_82544:	/* reset 10000us */
   8422 	case WM_T_82540:
   8423 	case WM_T_82545:
   8424 	case WM_T_82545_3:
   8425 	case WM_T_82546:
   8426 	case WM_T_82546_3:
   8427 	case WM_T_82541:
   8428 	case WM_T_82541_2:
   8429 	case WM_T_82547:
   8430 	case WM_T_82547_2:
   8431 	case WM_T_82571:	/* reset 100us */
   8432 	case WM_T_82572:
   8433 	case WM_T_82573:
   8434 	case WM_T_82574:
   8435 	case WM_T_82575:
   8436 	case WM_T_82576:
   8437 	case WM_T_82580:
   8438 	case WM_T_I350:
   8439 	case WM_T_I354:
   8440 	case WM_T_I210:
   8441 	case WM_T_I211:
   8442 	case WM_T_82583:
   8443 	case WM_T_80003:
   8444 		/* generic reset */
   8445 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8446 		CSR_WRITE_FLUSH(sc);
   8447 		delay(20000);
   8448 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8449 		CSR_WRITE_FLUSH(sc);
   8450 		delay(20000);
   8451 
   8452 		if ((sc->sc_type == WM_T_82541)
   8453 		    || (sc->sc_type == WM_T_82541_2)
   8454 		    || (sc->sc_type == WM_T_82547)
   8455 		    || (sc->sc_type == WM_T_82547_2)) {
   8456 			/* workaround for igp are done in igp_reset() */
   8457 			/* XXX add code to set LED after phy reset */
   8458 		}
   8459 		break;
   8460 	case WM_T_ICH8:
   8461 	case WM_T_ICH9:
   8462 	case WM_T_ICH10:
   8463 	case WM_T_PCH:
   8464 	case WM_T_PCH2:
   8465 	case WM_T_PCH_LPT:
   8466 	case WM_T_PCH_SPT:
   8467 		/* generic reset */
   8468 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8469 		CSR_WRITE_FLUSH(sc);
   8470 		delay(100);
   8471 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8472 		CSR_WRITE_FLUSH(sc);
   8473 		delay(150);
   8474 		break;
   8475 	default:
   8476 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8477 		    __func__);
   8478 		break;
   8479 	}
   8480 
   8481 	sc->phy.release(sc);
   8482 
   8483 	/* get_cfg_done */
   8484 	wm_get_cfg_done(sc);
   8485 
   8486 	/* extra setup */
   8487 	switch (sc->sc_type) {
   8488 	case WM_T_82542_2_0:
   8489 	case WM_T_82542_2_1:
   8490 	case WM_T_82543:
   8491 	case WM_T_82544:
   8492 	case WM_T_82540:
   8493 	case WM_T_82545:
   8494 	case WM_T_82545_3:
   8495 	case WM_T_82546:
   8496 	case WM_T_82546_3:
   8497 	case WM_T_82541_2:
   8498 	case WM_T_82547_2:
   8499 	case WM_T_82571:
   8500 	case WM_T_82572:
   8501 	case WM_T_82573:
   8502 	case WM_T_82575:
   8503 	case WM_T_82576:
   8504 	case WM_T_82580:
   8505 	case WM_T_I350:
   8506 	case WM_T_I354:
   8507 	case WM_T_I210:
   8508 	case WM_T_I211:
   8509 	case WM_T_80003:
   8510 		/* null */
   8511 		break;
   8512 	case WM_T_82574:
   8513 	case WM_T_82583:
   8514 		wm_lplu_d0_disable(sc);
   8515 		break;
   8516 	case WM_T_82541:
   8517 	case WM_T_82547:
   8518 		/* XXX Configure actively LED after PHY reset */
   8519 		break;
   8520 	case WM_T_ICH8:
   8521 	case WM_T_ICH9:
   8522 	case WM_T_ICH10:
   8523 	case WM_T_PCH:
   8524 	case WM_T_PCH2:
   8525 	case WM_T_PCH_LPT:
   8526 	case WM_T_PCH_SPT:
   8527 		/* Allow time for h/w to get to a quiescent state afer reset */
   8528 		delay(10*1000);
   8529 
   8530 		if (sc->sc_type == WM_T_PCH)
   8531 			wm_hv_phy_workaround_ich8lan(sc);
   8532 
   8533 		if (sc->sc_type == WM_T_PCH2)
   8534 			wm_lv_phy_workaround_ich8lan(sc);
   8535 
   8536 		/* Clear the host wakeup bit after lcd reset */
   8537 		if (sc->sc_type >= WM_T_PCH) {
   8538 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8539 			    BM_PORT_GEN_CFG);
   8540 			reg &= ~BM_WUC_HOST_WU_BIT;
   8541 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8542 			    BM_PORT_GEN_CFG, reg);
   8543 		}
   8544 
   8545 		/*
   8546 		 * XXX Configure the LCD with th extended configuration region
   8547 		 * in NVM
   8548 		 */
   8549 
   8550 		/* Disable D0 LPLU. */
   8551 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8552 			wm_lplu_d0_disable_pch(sc);
   8553 		else
   8554 			wm_lplu_d0_disable(sc);	/* ICH* */
   8555 		break;
   8556 	default:
   8557 		panic("%s: unknown type\n", __func__);
   8558 		break;
   8559 	}
   8560 }
   8561 
   8562 /*
   8563  * Setup sc_phytype and mii_{read|write}reg.
   8564  *
   8565  *  To identify PHY type, correct read/write function should be selected.
   8566  * To select correct read/write function, PCI ID or MAC type are required
   8567  * without accessing PHY registers.
   8568  *
   8569  *  On the first call of this function, PHY ID is not known yet. Check
   8570  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   8571  * result might be incorrect.
   8572  *
   8573  *  In the second call, PHY OUI and model is used to identify PHY type.
   8574  * It might not be perfpect because of the lack of compared entry, but it
   8575  * would be better than the first call.
   8576  *
   8577  *  If the detected new result and previous assumption is different,
   8578  * diagnous message will be printed.
   8579  */
   8580 static void
   8581 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   8582     uint16_t phy_model)
   8583 {
   8584 	device_t dev = sc->sc_dev;
   8585 	struct mii_data *mii = &sc->sc_mii;
   8586 	uint16_t new_phytype = WMPHY_UNKNOWN;
   8587 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   8588 	mii_readreg_t new_readreg;
   8589 	mii_writereg_t new_writereg;
   8590 
   8591 	if (mii->mii_readreg == NULL) {
   8592 		/*
   8593 		 *  This is the first call of this function. For ICH and PCH
   8594 		 * variants, it's difficult to determine the PHY access method
   8595 		 * by sc_type, so use the PCI product ID for some devices.
   8596 		 */
   8597 
   8598 		switch (sc->sc_pcidevid) {
   8599 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   8600 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   8601 			/* 82577 */
   8602 			new_phytype = WMPHY_82577;
   8603 			break;
   8604 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   8605 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   8606 			/* 82578 */
   8607 			new_phytype = WMPHY_82578;
   8608 			break;
   8609 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8610 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8611 			/* 82579 */
   8612 			new_phytype = WMPHY_82579;
   8613 			break;
   8614 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8615 		case PCI_PRODUCT_INTEL_82801I_BM:
   8616 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   8617 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8618 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8619 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8620 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8621 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8622 			/* ICH8, 9, 10 with 82567 */
   8623 			new_phytype = WMPHY_BM;
   8624 			break;
   8625 		default:
   8626 			break;
   8627 		}
   8628 	} else {
   8629 		/* It's not the first call. Use PHY OUI and model */
   8630 		switch (phy_oui) {
   8631 		case MII_OUI_ATHEROS: /* XXX ??? */
   8632 			switch (phy_model) {
   8633 			case 0x0004: /* XXX */
   8634 				new_phytype = WMPHY_82578;
   8635 				break;
   8636 			default:
   8637 				break;
   8638 			}
   8639 			break;
   8640 		case MII_OUI_xxMARVELL:
   8641 			switch (phy_model) {
   8642 			case MII_MODEL_xxMARVELL_I210:
   8643 				new_phytype = WMPHY_I210;
   8644 				break;
   8645 			case MII_MODEL_xxMARVELL_E1011:
   8646 			case MII_MODEL_xxMARVELL_E1000_3:
   8647 			case MII_MODEL_xxMARVELL_E1000_5:
   8648 			case MII_MODEL_xxMARVELL_E1112:
   8649 				new_phytype = WMPHY_M88;
   8650 				break;
   8651 			case MII_MODEL_xxMARVELL_E1149:
   8652 				new_phytype = WMPHY_BM;
   8653 				break;
   8654 			case MII_MODEL_xxMARVELL_E1111:
   8655 			case MII_MODEL_xxMARVELL_I347:
   8656 			case MII_MODEL_xxMARVELL_E1512:
   8657 			case MII_MODEL_xxMARVELL_E1340M:
   8658 			case MII_MODEL_xxMARVELL_E1543:
   8659 				new_phytype = WMPHY_M88;
   8660 				break;
   8661 			case MII_MODEL_xxMARVELL_I82563:
   8662 				new_phytype = WMPHY_GG82563;
   8663 				break;
   8664 			default:
   8665 				break;
   8666 			}
   8667 			break;
   8668 		case MII_OUI_INTEL:
   8669 			switch (phy_model) {
   8670 			case MII_MODEL_INTEL_I82577:
   8671 				new_phytype = WMPHY_82577;
   8672 				break;
   8673 			case MII_MODEL_INTEL_I82579:
   8674 				new_phytype = WMPHY_82579;
   8675 				break;
   8676 			case MII_MODEL_INTEL_I217:
   8677 				new_phytype = WMPHY_I217;
   8678 				break;
   8679 			case MII_MODEL_INTEL_I82580:
   8680 			case MII_MODEL_INTEL_I350:
   8681 				new_phytype = WMPHY_82580;
   8682 				break;
   8683 			default:
   8684 				break;
   8685 			}
   8686 			break;
   8687 		case MII_OUI_yyINTEL:
   8688 			switch (phy_model) {
   8689 			case MII_MODEL_yyINTEL_I82562G:
   8690 			case MII_MODEL_yyINTEL_I82562EM:
   8691 			case MII_MODEL_yyINTEL_I82562ET:
   8692 				new_phytype = WMPHY_IFE;
   8693 				break;
   8694 			case MII_MODEL_yyINTEL_IGP01E1000:
   8695 				new_phytype = WMPHY_IGP;
   8696 				break;
   8697 			case MII_MODEL_yyINTEL_I82566:
   8698 				new_phytype = WMPHY_IGP_3;
   8699 				break;
   8700 			default:
   8701 				break;
   8702 			}
   8703 			break;
   8704 		default:
   8705 			break;
   8706 		}
   8707 		if (new_phytype == WMPHY_UNKNOWN)
   8708 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   8709 			    __func__);
   8710 
   8711 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8712 		    && (sc->sc_phytype != new_phytype )) {
   8713 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   8714 			    "was incorrect. PHY type from PHY ID = %u\n",
   8715 			    sc->sc_phytype, new_phytype);
   8716 		}
   8717 	}
   8718 
   8719 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   8720 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   8721 		/* SGMII */
   8722 		new_readreg = wm_sgmii_readreg;
   8723 		new_writereg = wm_sgmii_writereg;
   8724 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   8725 		/* BM2 (phyaddr == 1) */
   8726 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8727 		    && (new_phytype != WMPHY_BM)
   8728 		    && (new_phytype != WMPHY_UNKNOWN))
   8729 			doubt_phytype = new_phytype;
   8730 		new_phytype = WMPHY_BM;
   8731 		new_readreg = wm_gmii_bm_readreg;
   8732 		new_writereg = wm_gmii_bm_writereg;
   8733 	} else if (sc->sc_type >= WM_T_PCH) {
   8734 		/* All PCH* use _hv_ */
   8735 		new_readreg = wm_gmii_hv_readreg;
   8736 		new_writereg = wm_gmii_hv_writereg;
   8737 	} else if (sc->sc_type >= WM_T_ICH8) {
   8738 		/* non-82567 ICH8, 9 and 10 */
   8739 		new_readreg = wm_gmii_i82544_readreg;
   8740 		new_writereg = wm_gmii_i82544_writereg;
   8741 	} else if (sc->sc_type >= WM_T_80003) {
   8742 		/* 80003 */
   8743 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8744 		    && (new_phytype != WMPHY_GG82563)
   8745 		    && (new_phytype != WMPHY_UNKNOWN))
   8746 			doubt_phytype = new_phytype;
   8747 		new_phytype = WMPHY_GG82563;
   8748 		new_readreg = wm_gmii_i80003_readreg;
   8749 		new_writereg = wm_gmii_i80003_writereg;
   8750 	} else if (sc->sc_type >= WM_T_I210) {
   8751 		/* I210 and I211 */
   8752 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8753 		    && (new_phytype != WMPHY_I210)
   8754 		    && (new_phytype != WMPHY_UNKNOWN))
   8755 			doubt_phytype = new_phytype;
   8756 		new_phytype = WMPHY_I210;
   8757 		new_readreg = wm_gmii_gs40g_readreg;
   8758 		new_writereg = wm_gmii_gs40g_writereg;
   8759 	} else if (sc->sc_type >= WM_T_82580) {
   8760 		/* 82580, I350 and I354 */
   8761 		new_readreg = wm_gmii_82580_readreg;
   8762 		new_writereg = wm_gmii_82580_writereg;
   8763 	} else if (sc->sc_type >= WM_T_82544) {
   8764 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8765 		new_readreg = wm_gmii_i82544_readreg;
   8766 		new_writereg = wm_gmii_i82544_writereg;
   8767 	} else {
   8768 		new_readreg = wm_gmii_i82543_readreg;
   8769 		new_writereg = wm_gmii_i82543_writereg;
   8770 	}
   8771 
   8772 	if (new_phytype == WMPHY_BM) {
   8773 		/* All BM use _bm_ */
   8774 		new_readreg = wm_gmii_bm_readreg;
   8775 		new_writereg = wm_gmii_bm_writereg;
   8776 	}
   8777 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8778 		/* All PCH* use _hv_ */
   8779 		new_readreg = wm_gmii_hv_readreg;
   8780 		new_writereg = wm_gmii_hv_writereg;
   8781 	}
   8782 
   8783 	/* Diag output */
   8784 	if (doubt_phytype != WMPHY_UNKNOWN)
   8785 		aprint_error_dev(dev, "Assumed new PHY type was "
   8786 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   8787 		    new_phytype);
   8788 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   8789 	    && (sc->sc_phytype != new_phytype ))
   8790 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   8791 		    "was incorrect. New PHY type = %u\n",
   8792 		    sc->sc_phytype, new_phytype);
   8793 
   8794 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   8795 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   8796 
   8797 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   8798 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   8799 		    "function was incorrect.\n");
   8800 
   8801 	/* Update now */
   8802 	sc->sc_phytype = new_phytype;
   8803 	mii->mii_readreg = new_readreg;
   8804 	mii->mii_writereg = new_writereg;
   8805 }
   8806 
   8807 /*
   8808  * wm_get_phy_id_82575:
   8809  *
   8810  * Return PHY ID. Return -1 if it failed.
   8811  */
   8812 static int
   8813 wm_get_phy_id_82575(struct wm_softc *sc)
   8814 {
   8815 	uint32_t reg;
   8816 	int phyid = -1;
   8817 
   8818 	/* XXX */
   8819 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8820 		return -1;
   8821 
   8822 	if (wm_sgmii_uses_mdio(sc)) {
   8823 		switch (sc->sc_type) {
   8824 		case WM_T_82575:
   8825 		case WM_T_82576:
   8826 			reg = CSR_READ(sc, WMREG_MDIC);
   8827 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8828 			break;
   8829 		case WM_T_82580:
   8830 		case WM_T_I350:
   8831 		case WM_T_I354:
   8832 		case WM_T_I210:
   8833 		case WM_T_I211:
   8834 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8835 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8836 			break;
   8837 		default:
   8838 			return -1;
   8839 		}
   8840 	}
   8841 
   8842 	return phyid;
   8843 }
   8844 
   8845 
   8846 /*
   8847  * wm_gmii_mediainit:
   8848  *
   8849  *	Initialize media for use on 1000BASE-T devices.
   8850  */
   8851 static void
   8852 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8853 {
   8854 	device_t dev = sc->sc_dev;
   8855 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8856 	struct mii_data *mii = &sc->sc_mii;
   8857 	uint32_t reg;
   8858 
   8859 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8860 		device_xname(sc->sc_dev), __func__));
   8861 
   8862 	/* We have GMII. */
   8863 	sc->sc_flags |= WM_F_HAS_MII;
   8864 
   8865 	if (sc->sc_type == WM_T_80003)
   8866 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8867 	else
   8868 		sc->sc_tipg = TIPG_1000T_DFLT;
   8869 
   8870 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8871 	if ((sc->sc_type == WM_T_82580)
   8872 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8873 	    || (sc->sc_type == WM_T_I211)) {
   8874 		reg = CSR_READ(sc, WMREG_PHPM);
   8875 		reg &= ~PHPM_GO_LINK_D;
   8876 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8877 	}
   8878 
   8879 	/*
   8880 	 * Let the chip set speed/duplex on its own based on
   8881 	 * signals from the PHY.
   8882 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8883 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8884 	 */
   8885 	sc->sc_ctrl |= CTRL_SLU;
   8886 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8887 
   8888 	/* Initialize our media structures and probe the GMII. */
   8889 	mii->mii_ifp = ifp;
   8890 
   8891 	/*
   8892 	 * The first call of wm_mii_setup_phytype. The result might be
   8893 	 * incorrect.
   8894 	 */
   8895 	wm_gmii_setup_phytype(sc, 0, 0);
   8896 
   8897 	mii->mii_statchg = wm_gmii_statchg;
   8898 
   8899 	/* get PHY control from SMBus to PCIe */
   8900 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   8901 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   8902 		wm_smbustopci(sc);
   8903 
   8904 	wm_gmii_reset(sc);
   8905 
   8906 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8907 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8908 	    wm_gmii_mediastatus);
   8909 
   8910 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8911 	    || (sc->sc_type == WM_T_82580)
   8912 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8913 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8914 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8915 			/* Attach only one port */
   8916 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8917 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8918 		} else {
   8919 			int i, id;
   8920 			uint32_t ctrl_ext;
   8921 
   8922 			id = wm_get_phy_id_82575(sc);
   8923 			if (id != -1) {
   8924 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8925 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8926 			}
   8927 			if ((id == -1)
   8928 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8929 				/* Power on sgmii phy if it is disabled */
   8930 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8931 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8932 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8933 				CSR_WRITE_FLUSH(sc);
   8934 				delay(300*1000); /* XXX too long */
   8935 
   8936 				/* from 1 to 8 */
   8937 				for (i = 1; i < 8; i++)
   8938 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8939 					    0xffffffff, i, MII_OFFSET_ANY,
   8940 					    MIIF_DOPAUSE);
   8941 
   8942 				/* restore previous sfp cage power state */
   8943 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8944 			}
   8945 		}
   8946 	} else {
   8947 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8948 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8949 	}
   8950 
   8951 	/*
   8952 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8953 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8954 	 */
   8955 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8956 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8957 		wm_set_mdio_slow_mode_hv(sc);
   8958 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8959 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8960 	}
   8961 
   8962 	/*
   8963 	 * (For ICH8 variants)
   8964 	 * If PHY detection failed, use BM's r/w function and retry.
   8965 	 */
   8966 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8967 		/* if failed, retry with *_bm_* */
   8968 		aprint_verbose_dev(dev, "Assumed PHY access function "
   8969 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   8970 		    sc->sc_phytype);
   8971 		sc->sc_phytype = WMPHY_BM;
   8972 		mii->mii_readreg = wm_gmii_bm_readreg;
   8973 		mii->mii_writereg = wm_gmii_bm_writereg;
   8974 
   8975 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8976 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8977 	}
   8978 
   8979 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8980 		/* Any PHY wasn't find */
   8981 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8982 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8983 		sc->sc_phytype = WMPHY_NONE;
   8984 	} else {
   8985 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   8986 
   8987 		/*
   8988 		 * PHY Found! Check PHY type again by the second call of
   8989 		 * wm_mii_setup_phytype.
   8990 		 */
   8991 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   8992 		    child->mii_mpd_model);
   8993 
   8994 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8995 	}
   8996 }
   8997 
   8998 /*
   8999  * wm_gmii_mediachange:	[ifmedia interface function]
   9000  *
   9001  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9002  */
   9003 static int
   9004 wm_gmii_mediachange(struct ifnet *ifp)
   9005 {
   9006 	struct wm_softc *sc = ifp->if_softc;
   9007 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9008 	int rc;
   9009 
   9010 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9011 		device_xname(sc->sc_dev), __func__));
   9012 	if ((ifp->if_flags & IFF_UP) == 0)
   9013 		return 0;
   9014 
   9015 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9016 	sc->sc_ctrl |= CTRL_SLU;
   9017 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9018 	    || (sc->sc_type > WM_T_82543)) {
   9019 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9020 	} else {
   9021 		sc->sc_ctrl &= ~CTRL_ASDE;
   9022 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9023 		if (ife->ifm_media & IFM_FDX)
   9024 			sc->sc_ctrl |= CTRL_FD;
   9025 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9026 		case IFM_10_T:
   9027 			sc->sc_ctrl |= CTRL_SPEED_10;
   9028 			break;
   9029 		case IFM_100_TX:
   9030 			sc->sc_ctrl |= CTRL_SPEED_100;
   9031 			break;
   9032 		case IFM_1000_T:
   9033 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9034 			break;
   9035 		default:
   9036 			panic("wm_gmii_mediachange: bad media 0x%x",
   9037 			    ife->ifm_media);
   9038 		}
   9039 	}
   9040 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9041 	if (sc->sc_type <= WM_T_82543)
   9042 		wm_gmii_reset(sc);
   9043 
   9044 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9045 		return 0;
   9046 	return rc;
   9047 }
   9048 
   9049 /*
   9050  * wm_gmii_mediastatus:	[ifmedia interface function]
   9051  *
   9052  *	Get the current interface media status on a 1000BASE-T device.
   9053  */
   9054 static void
   9055 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9056 {
   9057 	struct wm_softc *sc = ifp->if_softc;
   9058 
   9059 	ether_mediastatus(ifp, ifmr);
   9060 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9061 	    | sc->sc_flowflags;
   9062 }
   9063 
   9064 #define	MDI_IO		CTRL_SWDPIN(2)
   9065 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9066 #define	MDI_CLK		CTRL_SWDPIN(3)
   9067 
   9068 static void
   9069 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9070 {
   9071 	uint32_t i, v;
   9072 
   9073 	v = CSR_READ(sc, WMREG_CTRL);
   9074 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9075 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9076 
   9077 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9078 		if (data & i)
   9079 			v |= MDI_IO;
   9080 		else
   9081 			v &= ~MDI_IO;
   9082 		CSR_WRITE(sc, WMREG_CTRL, v);
   9083 		CSR_WRITE_FLUSH(sc);
   9084 		delay(10);
   9085 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9086 		CSR_WRITE_FLUSH(sc);
   9087 		delay(10);
   9088 		CSR_WRITE(sc, WMREG_CTRL, v);
   9089 		CSR_WRITE_FLUSH(sc);
   9090 		delay(10);
   9091 	}
   9092 }
   9093 
   9094 static uint32_t
   9095 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9096 {
   9097 	uint32_t v, i, data = 0;
   9098 
   9099 	v = CSR_READ(sc, WMREG_CTRL);
   9100 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9101 	v |= CTRL_SWDPIO(3);
   9102 
   9103 	CSR_WRITE(sc, WMREG_CTRL, v);
   9104 	CSR_WRITE_FLUSH(sc);
   9105 	delay(10);
   9106 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9107 	CSR_WRITE_FLUSH(sc);
   9108 	delay(10);
   9109 	CSR_WRITE(sc, WMREG_CTRL, v);
   9110 	CSR_WRITE_FLUSH(sc);
   9111 	delay(10);
   9112 
   9113 	for (i = 0; i < 16; i++) {
   9114 		data <<= 1;
   9115 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9116 		CSR_WRITE_FLUSH(sc);
   9117 		delay(10);
   9118 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9119 			data |= 1;
   9120 		CSR_WRITE(sc, WMREG_CTRL, v);
   9121 		CSR_WRITE_FLUSH(sc);
   9122 		delay(10);
   9123 	}
   9124 
   9125 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9126 	CSR_WRITE_FLUSH(sc);
   9127 	delay(10);
   9128 	CSR_WRITE(sc, WMREG_CTRL, v);
   9129 	CSR_WRITE_FLUSH(sc);
   9130 	delay(10);
   9131 
   9132 	return data;
   9133 }
   9134 
   9135 #undef MDI_IO
   9136 #undef MDI_DIR
   9137 #undef MDI_CLK
   9138 
   9139 /*
   9140  * wm_gmii_i82543_readreg:	[mii interface function]
   9141  *
   9142  *	Read a PHY register on the GMII (i82543 version).
   9143  */
   9144 static int
   9145 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   9146 {
   9147 	struct wm_softc *sc = device_private(self);
   9148 	int rv;
   9149 
   9150 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9151 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9152 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9153 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9154 
   9155 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9156 	    device_xname(sc->sc_dev), phy, reg, rv));
   9157 
   9158 	return rv;
   9159 }
   9160 
   9161 /*
   9162  * wm_gmii_i82543_writereg:	[mii interface function]
   9163  *
   9164  *	Write a PHY register on the GMII (i82543 version).
   9165  */
   9166 static void
   9167 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   9168 {
   9169 	struct wm_softc *sc = device_private(self);
   9170 
   9171 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9172 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9173 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9174 	    (MII_COMMAND_START << 30), 32);
   9175 }
   9176 
   9177 /*
   9178  * wm_gmii_mdic_readreg:	[mii interface function]
   9179  *
   9180  *	Read a PHY register on the GMII.
   9181  */
   9182 static int
   9183 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   9184 {
   9185 	struct wm_softc *sc = device_private(self);
   9186 	uint32_t mdic = 0;
   9187 	int i, rv;
   9188 
   9189 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9190 	    MDIC_REGADD(reg));
   9191 
   9192 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9193 		mdic = CSR_READ(sc, WMREG_MDIC);
   9194 		if (mdic & MDIC_READY)
   9195 			break;
   9196 		delay(50);
   9197 	}
   9198 
   9199 	if ((mdic & MDIC_READY) == 0) {
   9200 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9201 		    device_xname(sc->sc_dev), phy, reg);
   9202 		rv = 0;
   9203 	} else if (mdic & MDIC_E) {
   9204 #if 0 /* This is normal if no PHY is present. */
   9205 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9206 		    device_xname(sc->sc_dev), phy, reg);
   9207 #endif
   9208 		rv = 0;
   9209 	} else {
   9210 		rv = MDIC_DATA(mdic);
   9211 		if (rv == 0xffff)
   9212 			rv = 0;
   9213 	}
   9214 
   9215 	return rv;
   9216 }
   9217 
   9218 /*
   9219  * wm_gmii_mdic_writereg:	[mii interface function]
   9220  *
   9221  *	Write a PHY register on the GMII.
   9222  */
   9223 static void
   9224 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   9225 {
   9226 	struct wm_softc *sc = device_private(self);
   9227 	uint32_t mdic = 0;
   9228 	int i;
   9229 
   9230 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9231 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9232 
   9233 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9234 		mdic = CSR_READ(sc, WMREG_MDIC);
   9235 		if (mdic & MDIC_READY)
   9236 			break;
   9237 		delay(50);
   9238 	}
   9239 
   9240 	if ((mdic & MDIC_READY) == 0)
   9241 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9242 		    device_xname(sc->sc_dev), phy, reg);
   9243 	else if (mdic & MDIC_E)
   9244 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9245 		    device_xname(sc->sc_dev), phy, reg);
   9246 }
   9247 
   9248 /*
   9249  * wm_gmii_i82544_readreg:	[mii interface function]
   9250  *
   9251  *	Read a PHY register on the GMII.
   9252  */
   9253 static int
   9254 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   9255 {
   9256 	struct wm_softc *sc = device_private(self);
   9257 	int rv;
   9258 
   9259 	if (sc->phy.acquire(sc)) {
   9260 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9261 		    __func__);
   9262 		return 0;
   9263 	}
   9264 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9265 	sc->phy.release(sc);
   9266 
   9267 	return rv;
   9268 }
   9269 
   9270 /*
   9271  * wm_gmii_i82544_writereg:	[mii interface function]
   9272  *
   9273  *	Write a PHY register on the GMII.
   9274  */
   9275 static void
   9276 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   9277 {
   9278 	struct wm_softc *sc = device_private(self);
   9279 
   9280 	if (sc->phy.acquire(sc)) {
   9281 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9282 		    __func__);
   9283 	}
   9284 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9285 	sc->phy.release(sc);
   9286 }
   9287 
   9288 /*
   9289  * wm_gmii_i80003_readreg:	[mii interface function]
   9290  *
   9291  *	Read a PHY register on the kumeran
   9292  * This could be handled by the PHY layer if we didn't have to lock the
   9293  * ressource ...
   9294  */
   9295 static int
   9296 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   9297 {
   9298 	struct wm_softc *sc = device_private(self);
   9299 	int rv;
   9300 
   9301 	if (phy != 1) /* only one PHY on kumeran bus */
   9302 		return 0;
   9303 
   9304 	if (sc->phy.acquire(sc)) {
   9305 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9306 		    __func__);
   9307 		return 0;
   9308 	}
   9309 
   9310 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9311 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9312 		    reg >> GG82563_PAGE_SHIFT);
   9313 	} else {
   9314 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9315 		    reg >> GG82563_PAGE_SHIFT);
   9316 	}
   9317 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9318 	delay(200);
   9319 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9320 	delay(200);
   9321 	sc->phy.release(sc);
   9322 
   9323 	return rv;
   9324 }
   9325 
   9326 /*
   9327  * wm_gmii_i80003_writereg:	[mii interface function]
   9328  *
   9329  *	Write a PHY register on the kumeran.
   9330  * This could be handled by the PHY layer if we didn't have to lock the
   9331  * ressource ...
   9332  */
   9333 static void
   9334 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   9335 {
   9336 	struct wm_softc *sc = device_private(self);
   9337 
   9338 	if (phy != 1) /* only one PHY on kumeran bus */
   9339 		return;
   9340 
   9341 	if (sc->phy.acquire(sc)) {
   9342 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9343 		    __func__);
   9344 		return;
   9345 	}
   9346 
   9347 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9348 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9349 		    reg >> GG82563_PAGE_SHIFT);
   9350 	} else {
   9351 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9352 		    reg >> GG82563_PAGE_SHIFT);
   9353 	}
   9354 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9355 	delay(200);
   9356 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9357 	delay(200);
   9358 
   9359 	sc->phy.release(sc);
   9360 }
   9361 
   9362 /*
   9363  * wm_gmii_bm_readreg:	[mii interface function]
   9364  *
   9365  *	Read a PHY register on the kumeran
   9366  * This could be handled by the PHY layer if we didn't have to lock the
   9367  * ressource ...
   9368  */
   9369 static int
   9370 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   9371 {
   9372 	struct wm_softc *sc = device_private(self);
   9373 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9374 	uint16_t val;
   9375 	int rv;
   9376 
   9377 	if (sc->phy.acquire(sc)) {
   9378 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9379 		    __func__);
   9380 		return 0;
   9381 	}
   9382 
   9383 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9384 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9385 		    || (reg == 31)) ? 1 : phy;
   9386 	/* Page 800 works differently than the rest so it has its own func */
   9387 	if (page == BM_WUC_PAGE) {
   9388 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9389 		rv = val;
   9390 		goto release;
   9391 	}
   9392 
   9393 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9394 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9395 		    && (sc->sc_type != WM_T_82583))
   9396 			wm_gmii_mdic_writereg(self, phy,
   9397 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9398 		else
   9399 			wm_gmii_mdic_writereg(self, phy,
   9400 			    BME1000_PHY_PAGE_SELECT, page);
   9401 	}
   9402 
   9403 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9404 
   9405 release:
   9406 	sc->phy.release(sc);
   9407 	return rv;
   9408 }
   9409 
   9410 /*
   9411  * wm_gmii_bm_writereg:	[mii interface function]
   9412  *
   9413  *	Write a PHY register on the kumeran.
   9414  * This could be handled by the PHY layer if we didn't have to lock the
   9415  * ressource ...
   9416  */
   9417 static void
   9418 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9419 {
   9420 	struct wm_softc *sc = device_private(self);
   9421 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9422 
   9423 	if (sc->phy.acquire(sc)) {
   9424 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9425 		    __func__);
   9426 		return;
   9427 	}
   9428 
   9429 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9430 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9431 		    || (reg == 31)) ? 1 : phy;
   9432 	/* Page 800 works differently than the rest so it has its own func */
   9433 	if (page == BM_WUC_PAGE) {
   9434 		uint16_t tmp;
   9435 
   9436 		tmp = val;
   9437 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9438 		goto release;
   9439 	}
   9440 
   9441 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9442 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9443 		    && (sc->sc_type != WM_T_82583))
   9444 			wm_gmii_mdic_writereg(self, phy,
   9445 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9446 		else
   9447 			wm_gmii_mdic_writereg(self, phy,
   9448 			    BME1000_PHY_PAGE_SELECT, page);
   9449 	}
   9450 
   9451 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9452 
   9453 release:
   9454 	sc->phy.release(sc);
   9455 }
   9456 
   9457 static void
   9458 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9459 {
   9460 	struct wm_softc *sc = device_private(self);
   9461 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9462 	uint16_t wuce, reg;
   9463 
   9464 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9465 		device_xname(sc->sc_dev), __func__));
   9466 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9467 	if (sc->sc_type == WM_T_PCH) {
   9468 		/* XXX e1000 driver do nothing... why? */
   9469 	}
   9470 
   9471 	/*
   9472 	 * 1) Enable PHY wakeup register first.
   9473 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9474 	 */
   9475 
   9476 	/* Set page 769 */
   9477 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9478 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9479 
   9480 	/* Read WUCE and save it */
   9481 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9482 
   9483 	reg = wuce | BM_WUC_ENABLE_BIT;
   9484 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9485 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9486 
   9487 	/* Select page 800 */
   9488 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9489 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9490 
   9491 	/*
   9492 	 * 2) Access PHY wakeup register.
   9493 	 * See e1000_access_phy_wakeup_reg_bm.
   9494 	 */
   9495 
   9496 	/* Write page 800 */
   9497 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9498 
   9499 	if (rd)
   9500 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9501 	else
   9502 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9503 
   9504 	/*
   9505 	 * 3) Disable PHY wakeup register.
   9506 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9507 	 */
   9508 	/* Set page 769 */
   9509 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9510 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9511 
   9512 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9513 }
   9514 
   9515 /*
   9516  * wm_gmii_hv_readreg:	[mii interface function]
   9517  *
   9518  *	Read a PHY register on the kumeran
   9519  * This could be handled by the PHY layer if we didn't have to lock the
   9520  * ressource ...
   9521  */
   9522 static int
   9523 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9524 {
   9525 	struct wm_softc *sc = device_private(self);
   9526 	int rv;
   9527 
   9528 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9529 		device_xname(sc->sc_dev), __func__));
   9530 	if (sc->phy.acquire(sc)) {
   9531 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9532 		    __func__);
   9533 		return 0;
   9534 	}
   9535 
   9536 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9537 	sc->phy.release(sc);
   9538 	return rv;
   9539 }
   9540 
   9541 static int
   9542 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9543 {
   9544 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9545 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9546 	uint16_t val;
   9547 	int rv;
   9548 
   9549 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9550 
   9551 	/* Page 800 works differently than the rest so it has its own func */
   9552 	if (page == BM_WUC_PAGE) {
   9553 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9554 		return val;
   9555 	}
   9556 
   9557 	/*
   9558 	 * Lower than page 768 works differently than the rest so it has its
   9559 	 * own func
   9560 	 */
   9561 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9562 		printf("gmii_hv_readreg!!!\n");
   9563 		return 0;
   9564 	}
   9565 
   9566 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9567 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9568 		    page << BME1000_PAGE_SHIFT);
   9569 	}
   9570 
   9571 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9572 	return rv;
   9573 }
   9574 
   9575 /*
   9576  * wm_gmii_hv_writereg:	[mii interface function]
   9577  *
   9578  *	Write a PHY register on the kumeran.
   9579  * This could be handled by the PHY layer if we didn't have to lock the
   9580  * ressource ...
   9581  */
   9582 static void
   9583 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9584 {
   9585 	struct wm_softc *sc = device_private(self);
   9586 
   9587 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9588 		device_xname(sc->sc_dev), __func__));
   9589 
   9590 	if (sc->phy.acquire(sc)) {
   9591 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9592 		    __func__);
   9593 		return;
   9594 	}
   9595 
   9596 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9597 	sc->phy.release(sc);
   9598 }
   9599 
   9600 static void
   9601 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9602 {
   9603 	struct wm_softc *sc = device_private(self);
   9604 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9605 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9606 
   9607 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9608 
   9609 	/* Page 800 works differently than the rest so it has its own func */
   9610 	if (page == BM_WUC_PAGE) {
   9611 		uint16_t tmp;
   9612 
   9613 		tmp = val;
   9614 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9615 		return;
   9616 	}
   9617 
   9618 	/*
   9619 	 * Lower than page 768 works differently than the rest so it has its
   9620 	 * own func
   9621 	 */
   9622 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9623 		printf("gmii_hv_writereg!!!\n");
   9624 		return;
   9625 	}
   9626 
   9627 	{
   9628 		/*
   9629 		 * XXX Workaround MDIO accesses being disabled after entering
   9630 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9631 		 * register is set)
   9632 		 */
   9633 		if (sc->sc_phytype == WMPHY_82578) {
   9634 			struct mii_softc *child;
   9635 
   9636 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9637 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9638 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9639 			    && ((val & (1 << 11)) != 0)) {
   9640 				printf("XXX need workaround\n");
   9641 			}
   9642 		}
   9643 
   9644 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9645 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9646 			    page << BME1000_PAGE_SHIFT);
   9647 		}
   9648 	}
   9649 
   9650 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   9651 }
   9652 
   9653 /*
   9654  * wm_gmii_82580_readreg:	[mii interface function]
   9655  *
   9656  *	Read a PHY register on the 82580 and I350.
   9657  * This could be handled by the PHY layer if we didn't have to lock the
   9658  * ressource ...
   9659  */
   9660 static int
   9661 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   9662 {
   9663 	struct wm_softc *sc = device_private(self);
   9664 	int rv;
   9665 
   9666 	if (sc->phy.acquire(sc) != 0) {
   9667 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9668 		    __func__);
   9669 		return 0;
   9670 	}
   9671 
   9672 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9673 
   9674 	sc->phy.release(sc);
   9675 	return rv;
   9676 }
   9677 
   9678 /*
   9679  * wm_gmii_82580_writereg:	[mii interface function]
   9680  *
   9681  *	Write a PHY register on the 82580 and I350.
   9682  * This could be handled by the PHY layer if we didn't have to lock the
   9683  * ressource ...
   9684  */
   9685 static void
   9686 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   9687 {
   9688 	struct wm_softc *sc = device_private(self);
   9689 
   9690 	if (sc->phy.acquire(sc) != 0) {
   9691 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9692 		    __func__);
   9693 		return;
   9694 	}
   9695 
   9696 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9697 
   9698 	sc->phy.release(sc);
   9699 }
   9700 
   9701 /*
   9702  * wm_gmii_gs40g_readreg:	[mii interface function]
   9703  *
   9704  *	Read a PHY register on the I2100 and I211.
   9705  * This could be handled by the PHY layer if we didn't have to lock the
   9706  * ressource ...
   9707  */
   9708 static int
   9709 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   9710 {
   9711 	struct wm_softc *sc = device_private(self);
   9712 	int page, offset;
   9713 	int rv;
   9714 
   9715 	/* Acquire semaphore */
   9716 	if (sc->phy.acquire(sc)) {
   9717 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9718 		    __func__);
   9719 		return 0;
   9720 	}
   9721 
   9722 	/* Page select */
   9723 	page = reg >> GS40G_PAGE_SHIFT;
   9724 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9725 
   9726 	/* Read reg */
   9727 	offset = reg & GS40G_OFFSET_MASK;
   9728 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   9729 
   9730 	sc->phy.release(sc);
   9731 	return rv;
   9732 }
   9733 
   9734 /*
   9735  * wm_gmii_gs40g_writereg:	[mii interface function]
   9736  *
   9737  *	Write a PHY register on the I210 and I211.
   9738  * This could be handled by the PHY layer if we didn't have to lock the
   9739  * ressource ...
   9740  */
   9741 static void
   9742 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   9743 {
   9744 	struct wm_softc *sc = device_private(self);
   9745 	int page, offset;
   9746 
   9747 	/* Acquire semaphore */
   9748 	if (sc->phy.acquire(sc)) {
   9749 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9750 		    __func__);
   9751 		return;
   9752 	}
   9753 
   9754 	/* Page select */
   9755 	page = reg >> GS40G_PAGE_SHIFT;
   9756 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9757 
   9758 	/* Write reg */
   9759 	offset = reg & GS40G_OFFSET_MASK;
   9760 	wm_gmii_mdic_writereg(self, phy, offset, val);
   9761 
   9762 	/* Release semaphore */
   9763 	sc->phy.release(sc);
   9764 }
   9765 
   9766 /*
   9767  * wm_gmii_statchg:	[mii interface function]
   9768  *
   9769  *	Callback from MII layer when media changes.
   9770  */
   9771 static void
   9772 wm_gmii_statchg(struct ifnet *ifp)
   9773 {
   9774 	struct wm_softc *sc = ifp->if_softc;
   9775 	struct mii_data *mii = &sc->sc_mii;
   9776 
   9777 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   9778 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9779 	sc->sc_fcrtl &= ~FCRTL_XONE;
   9780 
   9781 	/*
   9782 	 * Get flow control negotiation result.
   9783 	 */
   9784 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   9785 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   9786 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   9787 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   9788 	}
   9789 
   9790 	if (sc->sc_flowflags & IFM_FLOW) {
   9791 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   9792 			sc->sc_ctrl |= CTRL_TFCE;
   9793 			sc->sc_fcrtl |= FCRTL_XONE;
   9794 		}
   9795 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   9796 			sc->sc_ctrl |= CTRL_RFCE;
   9797 	}
   9798 
   9799 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   9800 		DPRINTF(WM_DEBUG_LINK,
   9801 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   9802 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9803 	} else {
   9804 		DPRINTF(WM_DEBUG_LINK,
   9805 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   9806 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9807 	}
   9808 
   9809 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9810 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9811 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   9812 						 : WMREG_FCRTL, sc->sc_fcrtl);
   9813 	if (sc->sc_type == WM_T_80003) {
   9814 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   9815 		case IFM_1000_T:
   9816 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9817 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   9818 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9819 			break;
   9820 		default:
   9821 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9822 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   9823 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   9824 			break;
   9825 		}
   9826 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   9827 	}
   9828 }
   9829 
   9830 /* kumeran related (80003, ICH* and PCH*) */
   9831 
   9832 /*
   9833  * wm_kmrn_readreg:
   9834  *
   9835  *	Read a kumeran register
   9836  */
   9837 static int
   9838 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   9839 {
   9840 	int rv;
   9841 
   9842 	if (sc->sc_type == WM_T_80003)
   9843 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9844 	else
   9845 		rv = sc->phy.acquire(sc);
   9846 	if (rv != 0) {
   9847 		aprint_error_dev(sc->sc_dev,
   9848 		    "%s: failed to get semaphore\n", __func__);
   9849 		return 0;
   9850 	}
   9851 
   9852 	rv = wm_kmrn_readreg_locked(sc, reg);
   9853 
   9854 	if (sc->sc_type == WM_T_80003)
   9855 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9856 	else
   9857 		sc->phy.release(sc);
   9858 
   9859 	return rv;
   9860 }
   9861 
   9862 static int
   9863 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   9864 {
   9865 	int rv;
   9866 
   9867 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9868 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9869 	    KUMCTRLSTA_REN);
   9870 	CSR_WRITE_FLUSH(sc);
   9871 	delay(2);
   9872 
   9873 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9874 
   9875 	return rv;
   9876 }
   9877 
   9878 /*
   9879  * wm_kmrn_writereg:
   9880  *
   9881  *	Write a kumeran register
   9882  */
   9883 static void
   9884 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9885 {
   9886 	int rv;
   9887 
   9888 	if (sc->sc_type == WM_T_80003)
   9889 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9890 	else
   9891 		rv = sc->phy.acquire(sc);
   9892 	if (rv != 0) {
   9893 		aprint_error_dev(sc->sc_dev,
   9894 		    "%s: failed to get semaphore\n", __func__);
   9895 		return;
   9896 	}
   9897 
   9898 	wm_kmrn_writereg_locked(sc, reg, val);
   9899 
   9900 	if (sc->sc_type == WM_T_80003)
   9901 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9902 	else
   9903 		sc->phy.release(sc);
   9904 }
   9905 
   9906 static void
   9907 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   9908 {
   9909 
   9910 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9911 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9912 	    (val & KUMCTRLSTA_MASK));
   9913 }
   9914 
   9915 /* SGMII related */
   9916 
   9917 /*
   9918  * wm_sgmii_uses_mdio
   9919  *
   9920  * Check whether the transaction is to the internal PHY or the external
   9921  * MDIO interface. Return true if it's MDIO.
   9922  */
   9923 static bool
   9924 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9925 {
   9926 	uint32_t reg;
   9927 	bool ismdio = false;
   9928 
   9929 	switch (sc->sc_type) {
   9930 	case WM_T_82575:
   9931 	case WM_T_82576:
   9932 		reg = CSR_READ(sc, WMREG_MDIC);
   9933 		ismdio = ((reg & MDIC_DEST) != 0);
   9934 		break;
   9935 	case WM_T_82580:
   9936 	case WM_T_I350:
   9937 	case WM_T_I354:
   9938 	case WM_T_I210:
   9939 	case WM_T_I211:
   9940 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9941 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9942 		break;
   9943 	default:
   9944 		break;
   9945 	}
   9946 
   9947 	return ismdio;
   9948 }
   9949 
   9950 /*
   9951  * wm_sgmii_readreg:	[mii interface function]
   9952  *
   9953  *	Read a PHY register on the SGMII
   9954  * This could be handled by the PHY layer if we didn't have to lock the
   9955  * ressource ...
   9956  */
   9957 static int
   9958 wm_sgmii_readreg(device_t self, int phy, int reg)
   9959 {
   9960 	struct wm_softc *sc = device_private(self);
   9961 	uint32_t i2ccmd;
   9962 	int i, rv;
   9963 
   9964 	if (sc->phy.acquire(sc)) {
   9965 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9966 		    __func__);
   9967 		return 0;
   9968 	}
   9969 
   9970 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9971 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9972 	    | I2CCMD_OPCODE_READ;
   9973 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9974 
   9975 	/* Poll the ready bit */
   9976 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9977 		delay(50);
   9978 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9979 		if (i2ccmd & I2CCMD_READY)
   9980 			break;
   9981 	}
   9982 	if ((i2ccmd & I2CCMD_READY) == 0)
   9983 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9984 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9985 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9986 
   9987 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9988 
   9989 	sc->phy.release(sc);
   9990 	return rv;
   9991 }
   9992 
   9993 /*
   9994  * wm_sgmii_writereg:	[mii interface function]
   9995  *
   9996  *	Write a PHY register on the SGMII.
   9997  * This could be handled by the PHY layer if we didn't have to lock the
   9998  * ressource ...
   9999  */
   10000 static void
   10001 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   10002 {
   10003 	struct wm_softc *sc = device_private(self);
   10004 	uint32_t i2ccmd;
   10005 	int i;
   10006 	int val_swapped;
   10007 
   10008 	if (sc->phy.acquire(sc) != 0) {
   10009 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10010 		    __func__);
   10011 		return;
   10012 	}
   10013 	/* Swap the data bytes for the I2C interface */
   10014 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10015 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10016 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10017 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10018 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10019 
   10020 	/* Poll the ready bit */
   10021 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10022 		delay(50);
   10023 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10024 		if (i2ccmd & I2CCMD_READY)
   10025 			break;
   10026 	}
   10027 	if ((i2ccmd & I2CCMD_READY) == 0)
   10028 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   10029 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10030 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10031 
   10032 	sc->phy.release(sc);
   10033 }
   10034 
   10035 /* TBI related */
   10036 
   10037 /*
   10038  * wm_tbi_mediainit:
   10039  *
   10040  *	Initialize media for use on 1000BASE-X devices.
   10041  */
   10042 static void
   10043 wm_tbi_mediainit(struct wm_softc *sc)
   10044 {
   10045 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10046 	const char *sep = "";
   10047 
   10048 	if (sc->sc_type < WM_T_82543)
   10049 		sc->sc_tipg = TIPG_WM_DFLT;
   10050 	else
   10051 		sc->sc_tipg = TIPG_LG_DFLT;
   10052 
   10053 	sc->sc_tbi_serdes_anegticks = 5;
   10054 
   10055 	/* Initialize our media structures */
   10056 	sc->sc_mii.mii_ifp = ifp;
   10057 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10058 
   10059 	if ((sc->sc_type >= WM_T_82575)
   10060 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10061 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10062 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10063 	else
   10064 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10065 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10066 
   10067 	/*
   10068 	 * SWD Pins:
   10069 	 *
   10070 	 *	0 = Link LED (output)
   10071 	 *	1 = Loss Of Signal (input)
   10072 	 */
   10073 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10074 
   10075 	/* XXX Perhaps this is only for TBI */
   10076 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10077 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10078 
   10079 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10080 		sc->sc_ctrl &= ~CTRL_LRST;
   10081 
   10082 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10083 
   10084 #define	ADD(ss, mm, dd)							\
   10085 do {									\
   10086 	aprint_normal("%s%s", sep, ss);					\
   10087 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10088 	sep = ", ";							\
   10089 } while (/*CONSTCOND*/0)
   10090 
   10091 	aprint_normal_dev(sc->sc_dev, "");
   10092 
   10093 	if (sc->sc_type == WM_T_I354) {
   10094 		uint32_t status;
   10095 
   10096 		status = CSR_READ(sc, WMREG_STATUS);
   10097 		if (((status & STATUS_2P5_SKU) != 0)
   10098 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10099 			ADD("2500baseKX-FDX", IFM_2500_SX | IFM_FDX,ANAR_X_FD);
   10100 		} else
   10101 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX,ANAR_X_FD);
   10102 	} else if (sc->sc_type == WM_T_82545) {
   10103 		/* Only 82545 is LX (XXX except SFP) */
   10104 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10105 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10106 	} else {
   10107 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10108 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10109 	}
   10110 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10111 	aprint_normal("\n");
   10112 
   10113 #undef ADD
   10114 
   10115 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10116 }
   10117 
   10118 /*
   10119  * wm_tbi_mediachange:	[ifmedia interface function]
   10120  *
   10121  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10122  */
   10123 static int
   10124 wm_tbi_mediachange(struct ifnet *ifp)
   10125 {
   10126 	struct wm_softc *sc = ifp->if_softc;
   10127 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10128 	uint32_t status;
   10129 	int i;
   10130 
   10131 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10132 		/* XXX need some work for >= 82571 and < 82575 */
   10133 		if (sc->sc_type < WM_T_82575)
   10134 			return 0;
   10135 	}
   10136 
   10137 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10138 	    || (sc->sc_type >= WM_T_82575))
   10139 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10140 
   10141 	sc->sc_ctrl &= ~CTRL_LRST;
   10142 	sc->sc_txcw = TXCW_ANE;
   10143 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10144 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10145 	else if (ife->ifm_media & IFM_FDX)
   10146 		sc->sc_txcw |= TXCW_FD;
   10147 	else
   10148 		sc->sc_txcw |= TXCW_HD;
   10149 
   10150 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10151 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10152 
   10153 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10154 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10155 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10156 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10157 	CSR_WRITE_FLUSH(sc);
   10158 	delay(1000);
   10159 
   10160 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10161 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10162 
   10163 	/*
   10164 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10165 	 * optics detect a signal, 0 if they don't.
   10166 	 */
   10167 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10168 		/* Have signal; wait for the link to come up. */
   10169 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10170 			delay(10000);
   10171 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10172 				break;
   10173 		}
   10174 
   10175 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10176 			    device_xname(sc->sc_dev),i));
   10177 
   10178 		status = CSR_READ(sc, WMREG_STATUS);
   10179 		DPRINTF(WM_DEBUG_LINK,
   10180 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10181 			device_xname(sc->sc_dev),status, STATUS_LU));
   10182 		if (status & STATUS_LU) {
   10183 			/* Link is up. */
   10184 			DPRINTF(WM_DEBUG_LINK,
   10185 			    ("%s: LINK: set media -> link up %s\n",
   10186 			    device_xname(sc->sc_dev),
   10187 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10188 
   10189 			/*
   10190 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10191 			 * so we should update sc->sc_ctrl
   10192 			 */
   10193 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10194 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10195 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10196 			if (status & STATUS_FD)
   10197 				sc->sc_tctl |=
   10198 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10199 			else
   10200 				sc->sc_tctl |=
   10201 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10202 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10203 				sc->sc_fcrtl |= FCRTL_XONE;
   10204 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10205 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10206 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10207 				      sc->sc_fcrtl);
   10208 			sc->sc_tbi_linkup = 1;
   10209 		} else {
   10210 			if (i == WM_LINKUP_TIMEOUT)
   10211 				wm_check_for_link(sc);
   10212 			/* Link is down. */
   10213 			DPRINTF(WM_DEBUG_LINK,
   10214 			    ("%s: LINK: set media -> link down\n",
   10215 			    device_xname(sc->sc_dev)));
   10216 			sc->sc_tbi_linkup = 0;
   10217 		}
   10218 	} else {
   10219 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10220 		    device_xname(sc->sc_dev)));
   10221 		sc->sc_tbi_linkup = 0;
   10222 	}
   10223 
   10224 	wm_tbi_serdes_set_linkled(sc);
   10225 
   10226 	return 0;
   10227 }
   10228 
   10229 /*
   10230  * wm_tbi_mediastatus:	[ifmedia interface function]
   10231  *
   10232  *	Get the current interface media status on a 1000BASE-X device.
   10233  */
   10234 static void
   10235 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10236 {
   10237 	struct wm_softc *sc = ifp->if_softc;
   10238 	uint32_t ctrl, status;
   10239 
   10240 	ifmr->ifm_status = IFM_AVALID;
   10241 	ifmr->ifm_active = IFM_ETHER;
   10242 
   10243 	status = CSR_READ(sc, WMREG_STATUS);
   10244 	if ((status & STATUS_LU) == 0) {
   10245 		ifmr->ifm_active |= IFM_NONE;
   10246 		return;
   10247 	}
   10248 
   10249 	ifmr->ifm_status |= IFM_ACTIVE;
   10250 	/* Only 82545 is LX */
   10251 	if (sc->sc_type == WM_T_82545)
   10252 		ifmr->ifm_active |= IFM_1000_LX;
   10253 	else
   10254 		ifmr->ifm_active |= IFM_1000_SX;
   10255 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10256 		ifmr->ifm_active |= IFM_FDX;
   10257 	else
   10258 		ifmr->ifm_active |= IFM_HDX;
   10259 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10260 	if (ctrl & CTRL_RFCE)
   10261 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10262 	if (ctrl & CTRL_TFCE)
   10263 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10264 }
   10265 
   10266 /* XXX TBI only */
   10267 static int
   10268 wm_check_for_link(struct wm_softc *sc)
   10269 {
   10270 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10271 	uint32_t rxcw;
   10272 	uint32_t ctrl;
   10273 	uint32_t status;
   10274 	uint32_t sig;
   10275 
   10276 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10277 		/* XXX need some work for >= 82571 */
   10278 		if (sc->sc_type >= WM_T_82571) {
   10279 			sc->sc_tbi_linkup = 1;
   10280 			return 0;
   10281 		}
   10282 	}
   10283 
   10284 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10285 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10286 	status = CSR_READ(sc, WMREG_STATUS);
   10287 
   10288 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10289 
   10290 	DPRINTF(WM_DEBUG_LINK,
   10291 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10292 		device_xname(sc->sc_dev), __func__,
   10293 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10294 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10295 
   10296 	/*
   10297 	 * SWDPIN   LU RXCW
   10298 	 *      0    0    0
   10299 	 *      0    0    1	(should not happen)
   10300 	 *      0    1    0	(should not happen)
   10301 	 *      0    1    1	(should not happen)
   10302 	 *      1    0    0	Disable autonego and force linkup
   10303 	 *      1    0    1	got /C/ but not linkup yet
   10304 	 *      1    1    0	(linkup)
   10305 	 *      1    1    1	If IFM_AUTO, back to autonego
   10306 	 *
   10307 	 */
   10308 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10309 	    && ((status & STATUS_LU) == 0)
   10310 	    && ((rxcw & RXCW_C) == 0)) {
   10311 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10312 			__func__));
   10313 		sc->sc_tbi_linkup = 0;
   10314 		/* Disable auto-negotiation in the TXCW register */
   10315 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10316 
   10317 		/*
   10318 		 * Force link-up and also force full-duplex.
   10319 		 *
   10320 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10321 		 * so we should update sc->sc_ctrl
   10322 		 */
   10323 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10324 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10325 	} else if (((status & STATUS_LU) != 0)
   10326 	    && ((rxcw & RXCW_C) != 0)
   10327 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10328 		sc->sc_tbi_linkup = 1;
   10329 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10330 			__func__));
   10331 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10332 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10333 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10334 	    && ((rxcw & RXCW_C) != 0)) {
   10335 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10336 	} else {
   10337 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10338 			status));
   10339 	}
   10340 
   10341 	return 0;
   10342 }
   10343 
   10344 /*
   10345  * wm_tbi_tick:
   10346  *
   10347  *	Check the link on TBI devices.
   10348  *	This function acts as mii_tick().
   10349  */
   10350 static void
   10351 wm_tbi_tick(struct wm_softc *sc)
   10352 {
   10353 	struct mii_data *mii = &sc->sc_mii;
   10354 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10355 	uint32_t status;
   10356 
   10357 	KASSERT(WM_CORE_LOCKED(sc));
   10358 
   10359 	status = CSR_READ(sc, WMREG_STATUS);
   10360 
   10361 	/* XXX is this needed? */
   10362 	(void)CSR_READ(sc, WMREG_RXCW);
   10363 	(void)CSR_READ(sc, WMREG_CTRL);
   10364 
   10365 	/* set link status */
   10366 	if ((status & STATUS_LU) == 0) {
   10367 		DPRINTF(WM_DEBUG_LINK,
   10368 		    ("%s: LINK: checklink -> down\n",
   10369 			device_xname(sc->sc_dev)));
   10370 		sc->sc_tbi_linkup = 0;
   10371 	} else if (sc->sc_tbi_linkup == 0) {
   10372 		DPRINTF(WM_DEBUG_LINK,
   10373 		    ("%s: LINK: checklink -> up %s\n",
   10374 			device_xname(sc->sc_dev),
   10375 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10376 		sc->sc_tbi_linkup = 1;
   10377 		sc->sc_tbi_serdes_ticks = 0;
   10378 	}
   10379 
   10380 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10381 		goto setled;
   10382 
   10383 	if ((status & STATUS_LU) == 0) {
   10384 		sc->sc_tbi_linkup = 0;
   10385 		/* If the timer expired, retry autonegotiation */
   10386 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10387 		    && (++sc->sc_tbi_serdes_ticks
   10388 			>= sc->sc_tbi_serdes_anegticks)) {
   10389 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10390 			sc->sc_tbi_serdes_ticks = 0;
   10391 			/*
   10392 			 * Reset the link, and let autonegotiation do
   10393 			 * its thing
   10394 			 */
   10395 			sc->sc_ctrl |= CTRL_LRST;
   10396 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10397 			CSR_WRITE_FLUSH(sc);
   10398 			delay(1000);
   10399 			sc->sc_ctrl &= ~CTRL_LRST;
   10400 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10401 			CSR_WRITE_FLUSH(sc);
   10402 			delay(1000);
   10403 			CSR_WRITE(sc, WMREG_TXCW,
   10404 			    sc->sc_txcw & ~TXCW_ANE);
   10405 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10406 		}
   10407 	}
   10408 
   10409 setled:
   10410 	wm_tbi_serdes_set_linkled(sc);
   10411 }
   10412 
   10413 /* SERDES related */
   10414 static void
   10415 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10416 {
   10417 	uint32_t reg;
   10418 
   10419 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10420 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10421 		return;
   10422 
   10423 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10424 	reg |= PCS_CFG_PCS_EN;
   10425 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10426 
   10427 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10428 	reg &= ~CTRL_EXT_SWDPIN(3);
   10429 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10430 	CSR_WRITE_FLUSH(sc);
   10431 }
   10432 
   10433 static int
   10434 wm_serdes_mediachange(struct ifnet *ifp)
   10435 {
   10436 	struct wm_softc *sc = ifp->if_softc;
   10437 	bool pcs_autoneg = true; /* XXX */
   10438 	uint32_t ctrl_ext, pcs_lctl, reg;
   10439 
   10440 	/* XXX Currently, this function is not called on 8257[12] */
   10441 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10442 	    || (sc->sc_type >= WM_T_82575))
   10443 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10444 
   10445 	wm_serdes_power_up_link_82575(sc);
   10446 
   10447 	sc->sc_ctrl |= CTRL_SLU;
   10448 
   10449 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10450 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10451 
   10452 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10453 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10454 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10455 	case CTRL_EXT_LINK_MODE_SGMII:
   10456 		pcs_autoneg = true;
   10457 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10458 		break;
   10459 	case CTRL_EXT_LINK_MODE_1000KX:
   10460 		pcs_autoneg = false;
   10461 		/* FALLTHROUGH */
   10462 	default:
   10463 		if ((sc->sc_type == WM_T_82575)
   10464 		    || (sc->sc_type == WM_T_82576)) {
   10465 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10466 				pcs_autoneg = false;
   10467 		}
   10468 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10469 		    | CTRL_FRCFDX;
   10470 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10471 	}
   10472 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10473 
   10474 	if (pcs_autoneg) {
   10475 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10476 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10477 
   10478 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10479 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10480 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10481 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10482 	} else
   10483 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10484 
   10485 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10486 
   10487 
   10488 	return 0;
   10489 }
   10490 
   10491 static void
   10492 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10493 {
   10494 	struct wm_softc *sc = ifp->if_softc;
   10495 	struct mii_data *mii = &sc->sc_mii;
   10496 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10497 	uint32_t pcs_adv, pcs_lpab, reg;
   10498 
   10499 	ifmr->ifm_status = IFM_AVALID;
   10500 	ifmr->ifm_active = IFM_ETHER;
   10501 
   10502 	/* Check PCS */
   10503 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10504 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10505 		ifmr->ifm_active |= IFM_NONE;
   10506 		sc->sc_tbi_linkup = 0;
   10507 		goto setled;
   10508 	}
   10509 
   10510 	sc->sc_tbi_linkup = 1;
   10511 	ifmr->ifm_status |= IFM_ACTIVE;
   10512 	if (sc->sc_type == WM_T_I354) {
   10513 		uint32_t status;
   10514 
   10515 		status = CSR_READ(sc, WMREG_STATUS);
   10516 		if (((status & STATUS_2P5_SKU) != 0)
   10517 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10518 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10519 		} else
   10520 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10521 	} else {
   10522 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10523 		case PCS_LSTS_SPEED_10:
   10524 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10525 			break;
   10526 		case PCS_LSTS_SPEED_100:
   10527 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10528 			break;
   10529 		case PCS_LSTS_SPEED_1000:
   10530 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10531 			break;
   10532 		default:
   10533 			device_printf(sc->sc_dev, "Unknown speed\n");
   10534 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10535 			break;
   10536 		}
   10537 	}
   10538 	if ((reg & PCS_LSTS_FDX) != 0)
   10539 		ifmr->ifm_active |= IFM_FDX;
   10540 	else
   10541 		ifmr->ifm_active |= IFM_HDX;
   10542 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10543 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10544 		/* Check flow */
   10545 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10546 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10547 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10548 			goto setled;
   10549 		}
   10550 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10551 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10552 		DPRINTF(WM_DEBUG_LINK,
   10553 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10554 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10555 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10556 			mii->mii_media_active |= IFM_FLOW
   10557 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10558 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10559 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10560 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10561 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10562 			mii->mii_media_active |= IFM_FLOW
   10563 			    | IFM_ETH_TXPAUSE;
   10564 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10565 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10566 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10567 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10568 			mii->mii_media_active |= IFM_FLOW
   10569 			    | IFM_ETH_RXPAUSE;
   10570 		}
   10571 	}
   10572 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10573 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10574 setled:
   10575 	wm_tbi_serdes_set_linkled(sc);
   10576 }
   10577 
   10578 /*
   10579  * wm_serdes_tick:
   10580  *
   10581  *	Check the link on serdes devices.
   10582  */
   10583 static void
   10584 wm_serdes_tick(struct wm_softc *sc)
   10585 {
   10586 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10587 	struct mii_data *mii = &sc->sc_mii;
   10588 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10589 	uint32_t reg;
   10590 
   10591 	KASSERT(WM_CORE_LOCKED(sc));
   10592 
   10593 	mii->mii_media_status = IFM_AVALID;
   10594 	mii->mii_media_active = IFM_ETHER;
   10595 
   10596 	/* Check PCS */
   10597 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10598 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10599 		mii->mii_media_status |= IFM_ACTIVE;
   10600 		sc->sc_tbi_linkup = 1;
   10601 		sc->sc_tbi_serdes_ticks = 0;
   10602 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10603 		if ((reg & PCS_LSTS_FDX) != 0)
   10604 			mii->mii_media_active |= IFM_FDX;
   10605 		else
   10606 			mii->mii_media_active |= IFM_HDX;
   10607 	} else {
   10608 		mii->mii_media_status |= IFM_NONE;
   10609 		sc->sc_tbi_linkup = 0;
   10610 		/* If the timer expired, retry autonegotiation */
   10611 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10612 		    && (++sc->sc_tbi_serdes_ticks
   10613 			>= sc->sc_tbi_serdes_anegticks)) {
   10614 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10615 			sc->sc_tbi_serdes_ticks = 0;
   10616 			/* XXX */
   10617 			wm_serdes_mediachange(ifp);
   10618 		}
   10619 	}
   10620 
   10621 	wm_tbi_serdes_set_linkled(sc);
   10622 }
   10623 
   10624 /* SFP related */
   10625 
   10626 static int
   10627 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10628 {
   10629 	uint32_t i2ccmd;
   10630 	int i;
   10631 
   10632 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10633 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10634 
   10635 	/* Poll the ready bit */
   10636 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10637 		delay(50);
   10638 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10639 		if (i2ccmd & I2CCMD_READY)
   10640 			break;
   10641 	}
   10642 	if ((i2ccmd & I2CCMD_READY) == 0)
   10643 		return -1;
   10644 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10645 		return -1;
   10646 
   10647 	*data = i2ccmd & 0x00ff;
   10648 
   10649 	return 0;
   10650 }
   10651 
   10652 static uint32_t
   10653 wm_sfp_get_media_type(struct wm_softc *sc)
   10654 {
   10655 	uint32_t ctrl_ext;
   10656 	uint8_t val = 0;
   10657 	int timeout = 3;
   10658 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   10659 	int rv = -1;
   10660 
   10661 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10662 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   10663 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   10664 	CSR_WRITE_FLUSH(sc);
   10665 
   10666 	/* Read SFP module data */
   10667 	while (timeout) {
   10668 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   10669 		if (rv == 0)
   10670 			break;
   10671 		delay(100*1000); /* XXX too big */
   10672 		timeout--;
   10673 	}
   10674 	if (rv != 0)
   10675 		goto out;
   10676 	switch (val) {
   10677 	case SFF_SFP_ID_SFF:
   10678 		aprint_normal_dev(sc->sc_dev,
   10679 		    "Module/Connector soldered to board\n");
   10680 		break;
   10681 	case SFF_SFP_ID_SFP:
   10682 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   10683 		break;
   10684 	case SFF_SFP_ID_UNKNOWN:
   10685 		goto out;
   10686 	default:
   10687 		break;
   10688 	}
   10689 
   10690 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   10691 	if (rv != 0) {
   10692 		goto out;
   10693 	}
   10694 
   10695 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   10696 		mediatype = WM_MEDIATYPE_SERDES;
   10697 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   10698 		sc->sc_flags |= WM_F_SGMII;
   10699 		mediatype = WM_MEDIATYPE_COPPER;
   10700 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   10701 		sc->sc_flags |= WM_F_SGMII;
   10702 		mediatype = WM_MEDIATYPE_SERDES;
   10703 	}
   10704 
   10705 out:
   10706 	/* Restore I2C interface setting */
   10707 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10708 
   10709 	return mediatype;
   10710 }
   10711 
   10712 /*
   10713  * NVM related.
   10714  * Microwire, SPI (w/wo EERD) and Flash.
   10715  */
   10716 
   10717 /* Both spi and uwire */
   10718 
   10719 /*
   10720  * wm_eeprom_sendbits:
   10721  *
   10722  *	Send a series of bits to the EEPROM.
   10723  */
   10724 static void
   10725 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   10726 {
   10727 	uint32_t reg;
   10728 	int x;
   10729 
   10730 	reg = CSR_READ(sc, WMREG_EECD);
   10731 
   10732 	for (x = nbits; x > 0; x--) {
   10733 		if (bits & (1U << (x - 1)))
   10734 			reg |= EECD_DI;
   10735 		else
   10736 			reg &= ~EECD_DI;
   10737 		CSR_WRITE(sc, WMREG_EECD, reg);
   10738 		CSR_WRITE_FLUSH(sc);
   10739 		delay(2);
   10740 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10741 		CSR_WRITE_FLUSH(sc);
   10742 		delay(2);
   10743 		CSR_WRITE(sc, WMREG_EECD, reg);
   10744 		CSR_WRITE_FLUSH(sc);
   10745 		delay(2);
   10746 	}
   10747 }
   10748 
   10749 /*
   10750  * wm_eeprom_recvbits:
   10751  *
   10752  *	Receive a series of bits from the EEPROM.
   10753  */
   10754 static void
   10755 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   10756 {
   10757 	uint32_t reg, val;
   10758 	int x;
   10759 
   10760 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   10761 
   10762 	val = 0;
   10763 	for (x = nbits; x > 0; x--) {
   10764 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10765 		CSR_WRITE_FLUSH(sc);
   10766 		delay(2);
   10767 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   10768 			val |= (1U << (x - 1));
   10769 		CSR_WRITE(sc, WMREG_EECD, reg);
   10770 		CSR_WRITE_FLUSH(sc);
   10771 		delay(2);
   10772 	}
   10773 	*valp = val;
   10774 }
   10775 
   10776 /* Microwire */
   10777 
   10778 /*
   10779  * wm_nvm_read_uwire:
   10780  *
   10781  *	Read a word from the EEPROM using the MicroWire protocol.
   10782  */
   10783 static int
   10784 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10785 {
   10786 	uint32_t reg, val;
   10787 	int i;
   10788 
   10789 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10790 		device_xname(sc->sc_dev), __func__));
   10791 
   10792 	for (i = 0; i < wordcnt; i++) {
   10793 		/* Clear SK and DI. */
   10794 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   10795 		CSR_WRITE(sc, WMREG_EECD, reg);
   10796 
   10797 		/*
   10798 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   10799 		 * and Xen.
   10800 		 *
   10801 		 * We use this workaround only for 82540 because qemu's
   10802 		 * e1000 act as 82540.
   10803 		 */
   10804 		if (sc->sc_type == WM_T_82540) {
   10805 			reg |= EECD_SK;
   10806 			CSR_WRITE(sc, WMREG_EECD, reg);
   10807 			reg &= ~EECD_SK;
   10808 			CSR_WRITE(sc, WMREG_EECD, reg);
   10809 			CSR_WRITE_FLUSH(sc);
   10810 			delay(2);
   10811 		}
   10812 		/* XXX: end of workaround */
   10813 
   10814 		/* Set CHIP SELECT. */
   10815 		reg |= EECD_CS;
   10816 		CSR_WRITE(sc, WMREG_EECD, reg);
   10817 		CSR_WRITE_FLUSH(sc);
   10818 		delay(2);
   10819 
   10820 		/* Shift in the READ command. */
   10821 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   10822 
   10823 		/* Shift in address. */
   10824 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   10825 
   10826 		/* Shift out the data. */
   10827 		wm_eeprom_recvbits(sc, &val, 16);
   10828 		data[i] = val & 0xffff;
   10829 
   10830 		/* Clear CHIP SELECT. */
   10831 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   10832 		CSR_WRITE(sc, WMREG_EECD, reg);
   10833 		CSR_WRITE_FLUSH(sc);
   10834 		delay(2);
   10835 	}
   10836 
   10837 	return 0;
   10838 }
   10839 
   10840 /* SPI */
   10841 
   10842 /*
   10843  * Set SPI and FLASH related information from the EECD register.
   10844  * For 82541 and 82547, the word size is taken from EEPROM.
   10845  */
   10846 static int
   10847 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   10848 {
   10849 	int size;
   10850 	uint32_t reg;
   10851 	uint16_t data;
   10852 
   10853 	reg = CSR_READ(sc, WMREG_EECD);
   10854 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   10855 
   10856 	/* Read the size of NVM from EECD by default */
   10857 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10858 	switch (sc->sc_type) {
   10859 	case WM_T_82541:
   10860 	case WM_T_82541_2:
   10861 	case WM_T_82547:
   10862 	case WM_T_82547_2:
   10863 		/* Set dummy value to access EEPROM */
   10864 		sc->sc_nvm_wordsize = 64;
   10865 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   10866 		reg = data;
   10867 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10868 		if (size == 0)
   10869 			size = 6; /* 64 word size */
   10870 		else
   10871 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   10872 		break;
   10873 	case WM_T_80003:
   10874 	case WM_T_82571:
   10875 	case WM_T_82572:
   10876 	case WM_T_82573: /* SPI case */
   10877 	case WM_T_82574: /* SPI case */
   10878 	case WM_T_82583: /* SPI case */
   10879 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10880 		if (size > 14)
   10881 			size = 14;
   10882 		break;
   10883 	case WM_T_82575:
   10884 	case WM_T_82576:
   10885 	case WM_T_82580:
   10886 	case WM_T_I350:
   10887 	case WM_T_I354:
   10888 	case WM_T_I210:
   10889 	case WM_T_I211:
   10890 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10891 		if (size > 15)
   10892 			size = 15;
   10893 		break;
   10894 	default:
   10895 		aprint_error_dev(sc->sc_dev,
   10896 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   10897 		return -1;
   10898 		break;
   10899 	}
   10900 
   10901 	sc->sc_nvm_wordsize = 1 << size;
   10902 
   10903 	return 0;
   10904 }
   10905 
   10906 /*
   10907  * wm_nvm_ready_spi:
   10908  *
   10909  *	Wait for a SPI EEPROM to be ready for commands.
   10910  */
   10911 static int
   10912 wm_nvm_ready_spi(struct wm_softc *sc)
   10913 {
   10914 	uint32_t val;
   10915 	int usec;
   10916 
   10917 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10918 		device_xname(sc->sc_dev), __func__));
   10919 
   10920 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10921 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10922 		wm_eeprom_recvbits(sc, &val, 8);
   10923 		if ((val & SPI_SR_RDY) == 0)
   10924 			break;
   10925 	}
   10926 	if (usec >= SPI_MAX_RETRIES) {
   10927 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10928 		return 1;
   10929 	}
   10930 	return 0;
   10931 }
   10932 
   10933 /*
   10934  * wm_nvm_read_spi:
   10935  *
   10936  *	Read a work from the EEPROM using the SPI protocol.
   10937  */
   10938 static int
   10939 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10940 {
   10941 	uint32_t reg, val;
   10942 	int i;
   10943 	uint8_t opc;
   10944 
   10945 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10946 		device_xname(sc->sc_dev), __func__));
   10947 
   10948 	/* Clear SK and CS. */
   10949 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10950 	CSR_WRITE(sc, WMREG_EECD, reg);
   10951 	CSR_WRITE_FLUSH(sc);
   10952 	delay(2);
   10953 
   10954 	if (wm_nvm_ready_spi(sc))
   10955 		return 1;
   10956 
   10957 	/* Toggle CS to flush commands. */
   10958 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10959 	CSR_WRITE_FLUSH(sc);
   10960 	delay(2);
   10961 	CSR_WRITE(sc, WMREG_EECD, reg);
   10962 	CSR_WRITE_FLUSH(sc);
   10963 	delay(2);
   10964 
   10965 	opc = SPI_OPC_READ;
   10966 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10967 		opc |= SPI_OPC_A8;
   10968 
   10969 	wm_eeprom_sendbits(sc, opc, 8);
   10970 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10971 
   10972 	for (i = 0; i < wordcnt; i++) {
   10973 		wm_eeprom_recvbits(sc, &val, 16);
   10974 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10975 	}
   10976 
   10977 	/* Raise CS and clear SK. */
   10978 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10979 	CSR_WRITE(sc, WMREG_EECD, reg);
   10980 	CSR_WRITE_FLUSH(sc);
   10981 	delay(2);
   10982 
   10983 	return 0;
   10984 }
   10985 
   10986 /* Using with EERD */
   10987 
   10988 static int
   10989 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10990 {
   10991 	uint32_t attempts = 100000;
   10992 	uint32_t i, reg = 0;
   10993 	int32_t done = -1;
   10994 
   10995 	for (i = 0; i < attempts; i++) {
   10996 		reg = CSR_READ(sc, rw);
   10997 
   10998 		if (reg & EERD_DONE) {
   10999 			done = 0;
   11000 			break;
   11001 		}
   11002 		delay(5);
   11003 	}
   11004 
   11005 	return done;
   11006 }
   11007 
   11008 static int
   11009 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11010     uint16_t *data)
   11011 {
   11012 	int i, eerd = 0;
   11013 	int error = 0;
   11014 
   11015 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11016 		device_xname(sc->sc_dev), __func__));
   11017 
   11018 	for (i = 0; i < wordcnt; i++) {
   11019 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11020 
   11021 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11022 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11023 		if (error != 0)
   11024 			break;
   11025 
   11026 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11027 	}
   11028 
   11029 	return error;
   11030 }
   11031 
   11032 /* Flash */
   11033 
   11034 static int
   11035 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11036 {
   11037 	uint32_t eecd;
   11038 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11039 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11040 	uint8_t sig_byte = 0;
   11041 
   11042 	switch (sc->sc_type) {
   11043 	case WM_T_PCH_SPT:
   11044 		/*
   11045 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11046 		 * sector valid bits from the NVM.
   11047 		 */
   11048 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11049 		if ((*bank == 0) || (*bank == 1)) {
   11050 			aprint_error_dev(sc->sc_dev,
   11051 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11052 				*bank);
   11053 			return -1;
   11054 		} else {
   11055 			*bank = *bank - 2;
   11056 			return 0;
   11057 		}
   11058 	case WM_T_ICH8:
   11059 	case WM_T_ICH9:
   11060 		eecd = CSR_READ(sc, WMREG_EECD);
   11061 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11062 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11063 			return 0;
   11064 		}
   11065 		/* FALLTHROUGH */
   11066 	default:
   11067 		/* Default to 0 */
   11068 		*bank = 0;
   11069 
   11070 		/* Check bank 0 */
   11071 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11072 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11073 			*bank = 0;
   11074 			return 0;
   11075 		}
   11076 
   11077 		/* Check bank 1 */
   11078 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11079 		    &sig_byte);
   11080 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11081 			*bank = 1;
   11082 			return 0;
   11083 		}
   11084 	}
   11085 
   11086 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11087 		device_xname(sc->sc_dev)));
   11088 	return -1;
   11089 }
   11090 
   11091 /******************************************************************************
   11092  * This function does initial flash setup so that a new read/write/erase cycle
   11093  * can be started.
   11094  *
   11095  * sc - The pointer to the hw structure
   11096  ****************************************************************************/
   11097 static int32_t
   11098 wm_ich8_cycle_init(struct wm_softc *sc)
   11099 {
   11100 	uint16_t hsfsts;
   11101 	int32_t error = 1;
   11102 	int32_t i     = 0;
   11103 
   11104 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11105 
   11106 	/* May be check the Flash Des Valid bit in Hw status */
   11107 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11108 		return error;
   11109 	}
   11110 
   11111 	/* Clear FCERR in Hw status by writing 1 */
   11112 	/* Clear DAEL in Hw status by writing a 1 */
   11113 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11114 
   11115 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11116 
   11117 	/*
   11118 	 * Either we should have a hardware SPI cycle in progress bit to check
   11119 	 * against, in order to start a new cycle or FDONE bit should be
   11120 	 * changed in the hardware so that it is 1 after harware reset, which
   11121 	 * can then be used as an indication whether a cycle is in progress or
   11122 	 * has been completed .. we should also have some software semaphore
   11123 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11124 	 * threads access to those bits can be sequentiallized or a way so that
   11125 	 * 2 threads dont start the cycle at the same time
   11126 	 */
   11127 
   11128 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11129 		/*
   11130 		 * There is no cycle running at present, so we can start a
   11131 		 * cycle
   11132 		 */
   11133 
   11134 		/* Begin by setting Flash Cycle Done. */
   11135 		hsfsts |= HSFSTS_DONE;
   11136 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11137 		error = 0;
   11138 	} else {
   11139 		/*
   11140 		 * otherwise poll for sometime so the current cycle has a
   11141 		 * chance to end before giving up.
   11142 		 */
   11143 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11144 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11145 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11146 				error = 0;
   11147 				break;
   11148 			}
   11149 			delay(1);
   11150 		}
   11151 		if (error == 0) {
   11152 			/*
   11153 			 * Successful in waiting for previous cycle to timeout,
   11154 			 * now set the Flash Cycle Done.
   11155 			 */
   11156 			hsfsts |= HSFSTS_DONE;
   11157 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11158 		}
   11159 	}
   11160 	return error;
   11161 }
   11162 
   11163 /******************************************************************************
   11164  * This function starts a flash cycle and waits for its completion
   11165  *
   11166  * sc - The pointer to the hw structure
   11167  ****************************************************************************/
   11168 static int32_t
   11169 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11170 {
   11171 	uint16_t hsflctl;
   11172 	uint16_t hsfsts;
   11173 	int32_t error = 1;
   11174 	uint32_t i = 0;
   11175 
   11176 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11177 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11178 	hsflctl |= HSFCTL_GO;
   11179 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11180 
   11181 	/* Wait till FDONE bit is set to 1 */
   11182 	do {
   11183 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11184 		if (hsfsts & HSFSTS_DONE)
   11185 			break;
   11186 		delay(1);
   11187 		i++;
   11188 	} while (i < timeout);
   11189 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11190 		error = 0;
   11191 
   11192 	return error;
   11193 }
   11194 
   11195 /******************************************************************************
   11196  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11197  *
   11198  * sc - The pointer to the hw structure
   11199  * index - The index of the byte or word to read.
   11200  * size - Size of data to read, 1=byte 2=word, 4=dword
   11201  * data - Pointer to the word to store the value read.
   11202  *****************************************************************************/
   11203 static int32_t
   11204 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11205     uint32_t size, uint32_t *data)
   11206 {
   11207 	uint16_t hsfsts;
   11208 	uint16_t hsflctl;
   11209 	uint32_t flash_linear_address;
   11210 	uint32_t flash_data = 0;
   11211 	int32_t error = 1;
   11212 	int32_t count = 0;
   11213 
   11214 	if (size < 1  || size > 4 || data == 0x0 ||
   11215 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11216 		return error;
   11217 
   11218 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11219 	    sc->sc_ich8_flash_base;
   11220 
   11221 	do {
   11222 		delay(1);
   11223 		/* Steps */
   11224 		error = wm_ich8_cycle_init(sc);
   11225 		if (error)
   11226 			break;
   11227 
   11228 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11229 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11230 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11231 		    & HSFCTL_BCOUNT_MASK;
   11232 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11233 		if (sc->sc_type == WM_T_PCH_SPT) {
   11234 			/*
   11235 			 * In SPT, This register is in Lan memory space, not
   11236 			 * flash. Therefore, only 32 bit access is supported.
   11237 			 */
   11238 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11239 			    (uint32_t)hsflctl);
   11240 		} else
   11241 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11242 
   11243 		/*
   11244 		 * Write the last 24 bits of index into Flash Linear address
   11245 		 * field in Flash Address
   11246 		 */
   11247 		/* TODO: TBD maybe check the index against the size of flash */
   11248 
   11249 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11250 
   11251 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11252 
   11253 		/*
   11254 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11255 		 * the whole sequence a few more times, else read in (shift in)
   11256 		 * the Flash Data0, the order is least significant byte first
   11257 		 * msb to lsb
   11258 		 */
   11259 		if (error == 0) {
   11260 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11261 			if (size == 1)
   11262 				*data = (uint8_t)(flash_data & 0x000000FF);
   11263 			else if (size == 2)
   11264 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11265 			else if (size == 4)
   11266 				*data = (uint32_t)flash_data;
   11267 			break;
   11268 		} else {
   11269 			/*
   11270 			 * If we've gotten here, then things are probably
   11271 			 * completely hosed, but if the error condition is
   11272 			 * detected, it won't hurt to give it another try...
   11273 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11274 			 */
   11275 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11276 			if (hsfsts & HSFSTS_ERR) {
   11277 				/* Repeat for some time before giving up. */
   11278 				continue;
   11279 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11280 				break;
   11281 		}
   11282 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11283 
   11284 	return error;
   11285 }
   11286 
   11287 /******************************************************************************
   11288  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11289  *
   11290  * sc - pointer to wm_hw structure
   11291  * index - The index of the byte to read.
   11292  * data - Pointer to a byte to store the value read.
   11293  *****************************************************************************/
   11294 static int32_t
   11295 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11296 {
   11297 	int32_t status;
   11298 	uint32_t word = 0;
   11299 
   11300 	status = wm_read_ich8_data(sc, index, 1, &word);
   11301 	if (status == 0)
   11302 		*data = (uint8_t)word;
   11303 	else
   11304 		*data = 0;
   11305 
   11306 	return status;
   11307 }
   11308 
   11309 /******************************************************************************
   11310  * Reads a word from the NVM using the ICH8 flash access registers.
   11311  *
   11312  * sc - pointer to wm_hw structure
   11313  * index - The starting byte index of the word to read.
   11314  * data - Pointer to a word to store the value read.
   11315  *****************************************************************************/
   11316 static int32_t
   11317 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11318 {
   11319 	int32_t status;
   11320 	uint32_t word = 0;
   11321 
   11322 	status = wm_read_ich8_data(sc, index, 2, &word);
   11323 	if (status == 0)
   11324 		*data = (uint16_t)word;
   11325 	else
   11326 		*data = 0;
   11327 
   11328 	return status;
   11329 }
   11330 
   11331 /******************************************************************************
   11332  * Reads a dword from the NVM using the ICH8 flash access registers.
   11333  *
   11334  * sc - pointer to wm_hw structure
   11335  * index - The starting byte index of the word to read.
   11336  * data - Pointer to a word to store the value read.
   11337  *****************************************************************************/
   11338 static int32_t
   11339 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11340 {
   11341 	int32_t status;
   11342 
   11343 	status = wm_read_ich8_data(sc, index, 4, data);
   11344 	return status;
   11345 }
   11346 
   11347 /******************************************************************************
   11348  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11349  * register.
   11350  *
   11351  * sc - Struct containing variables accessed by shared code
   11352  * offset - offset of word in the EEPROM to read
   11353  * data - word read from the EEPROM
   11354  * words - number of words to read
   11355  *****************************************************************************/
   11356 static int
   11357 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11358 {
   11359 	int32_t  error = 0;
   11360 	uint32_t flash_bank = 0;
   11361 	uint32_t act_offset = 0;
   11362 	uint32_t bank_offset = 0;
   11363 	uint16_t word = 0;
   11364 	uint16_t i = 0;
   11365 
   11366 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11367 		device_xname(sc->sc_dev), __func__));
   11368 
   11369 	/*
   11370 	 * We need to know which is the valid flash bank.  In the event
   11371 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11372 	 * managing flash_bank.  So it cannot be trusted and needs
   11373 	 * to be updated with each read.
   11374 	 */
   11375 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11376 	if (error) {
   11377 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11378 			device_xname(sc->sc_dev)));
   11379 		flash_bank = 0;
   11380 	}
   11381 
   11382 	/*
   11383 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11384 	 * size
   11385 	 */
   11386 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11387 
   11388 	error = wm_get_swfwhw_semaphore(sc);
   11389 	if (error) {
   11390 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11391 		    __func__);
   11392 		return error;
   11393 	}
   11394 
   11395 	for (i = 0; i < words; i++) {
   11396 		/* The NVM part needs a byte offset, hence * 2 */
   11397 		act_offset = bank_offset + ((offset + i) * 2);
   11398 		error = wm_read_ich8_word(sc, act_offset, &word);
   11399 		if (error) {
   11400 			aprint_error_dev(sc->sc_dev,
   11401 			    "%s: failed to read NVM\n", __func__);
   11402 			break;
   11403 		}
   11404 		data[i] = word;
   11405 	}
   11406 
   11407 	wm_put_swfwhw_semaphore(sc);
   11408 	return error;
   11409 }
   11410 
   11411 /******************************************************************************
   11412  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11413  * register.
   11414  *
   11415  * sc - Struct containing variables accessed by shared code
   11416  * offset - offset of word in the EEPROM to read
   11417  * data - word read from the EEPROM
   11418  * words - number of words to read
   11419  *****************************************************************************/
   11420 static int
   11421 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11422 {
   11423 	int32_t  error = 0;
   11424 	uint32_t flash_bank = 0;
   11425 	uint32_t act_offset = 0;
   11426 	uint32_t bank_offset = 0;
   11427 	uint32_t dword = 0;
   11428 	uint16_t i = 0;
   11429 
   11430 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11431 		device_xname(sc->sc_dev), __func__));
   11432 
   11433 	/*
   11434 	 * We need to know which is the valid flash bank.  In the event
   11435 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11436 	 * managing flash_bank.  So it cannot be trusted and needs
   11437 	 * to be updated with each read.
   11438 	 */
   11439 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11440 	if (error) {
   11441 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11442 			device_xname(sc->sc_dev)));
   11443 		flash_bank = 0;
   11444 	}
   11445 
   11446 	/*
   11447 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11448 	 * size
   11449 	 */
   11450 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11451 
   11452 	error = wm_get_swfwhw_semaphore(sc);
   11453 	if (error) {
   11454 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11455 		    __func__);
   11456 		return error;
   11457 	}
   11458 
   11459 	for (i = 0; i < words; i++) {
   11460 		/* The NVM part needs a byte offset, hence * 2 */
   11461 		act_offset = bank_offset + ((offset + i) * 2);
   11462 		/* but we must read dword aligned, so mask ... */
   11463 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11464 		if (error) {
   11465 			aprint_error_dev(sc->sc_dev,
   11466 			    "%s: failed to read NVM\n", __func__);
   11467 			break;
   11468 		}
   11469 		/* ... and pick out low or high word */
   11470 		if ((act_offset & 0x2) == 0)
   11471 			data[i] = (uint16_t)(dword & 0xFFFF);
   11472 		else
   11473 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11474 	}
   11475 
   11476 	wm_put_swfwhw_semaphore(sc);
   11477 	return error;
   11478 }
   11479 
   11480 /* iNVM */
   11481 
   11482 static int
   11483 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11484 {
   11485 	int32_t  rv = 0;
   11486 	uint32_t invm_dword;
   11487 	uint16_t i;
   11488 	uint8_t record_type, word_address;
   11489 
   11490 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11491 		device_xname(sc->sc_dev), __func__));
   11492 
   11493 	for (i = 0; i < INVM_SIZE; i++) {
   11494 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11495 		/* Get record type */
   11496 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11497 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11498 			break;
   11499 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11500 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11501 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11502 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11503 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11504 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11505 			if (word_address == address) {
   11506 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11507 				rv = 0;
   11508 				break;
   11509 			}
   11510 		}
   11511 	}
   11512 
   11513 	return rv;
   11514 }
   11515 
   11516 static int
   11517 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11518 {
   11519 	int rv = 0;
   11520 	int i;
   11521 
   11522 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11523 		device_xname(sc->sc_dev), __func__));
   11524 
   11525 	for (i = 0; i < words; i++) {
   11526 		switch (offset + i) {
   11527 		case NVM_OFF_MACADDR:
   11528 		case NVM_OFF_MACADDR1:
   11529 		case NVM_OFF_MACADDR2:
   11530 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11531 			if (rv != 0) {
   11532 				data[i] = 0xffff;
   11533 				rv = -1;
   11534 			}
   11535 			break;
   11536 		case NVM_OFF_CFG2:
   11537 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11538 			if (rv != 0) {
   11539 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11540 				rv = 0;
   11541 			}
   11542 			break;
   11543 		case NVM_OFF_CFG4:
   11544 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11545 			if (rv != 0) {
   11546 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11547 				rv = 0;
   11548 			}
   11549 			break;
   11550 		case NVM_OFF_LED_1_CFG:
   11551 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11552 			if (rv != 0) {
   11553 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11554 				rv = 0;
   11555 			}
   11556 			break;
   11557 		case NVM_OFF_LED_0_2_CFG:
   11558 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11559 			if (rv != 0) {
   11560 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11561 				rv = 0;
   11562 			}
   11563 			break;
   11564 		case NVM_OFF_ID_LED_SETTINGS:
   11565 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11566 			if (rv != 0) {
   11567 				*data = ID_LED_RESERVED_FFFF;
   11568 				rv = 0;
   11569 			}
   11570 			break;
   11571 		default:
   11572 			DPRINTF(WM_DEBUG_NVM,
   11573 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11574 			*data = NVM_RESERVED_WORD;
   11575 			break;
   11576 		}
   11577 	}
   11578 
   11579 	return rv;
   11580 }
   11581 
   11582 /* Lock, detecting NVM type, validate checksum, version and read */
   11583 
   11584 /*
   11585  * wm_nvm_acquire:
   11586  *
   11587  *	Perform the EEPROM handshake required on some chips.
   11588  */
   11589 static int
   11590 wm_nvm_acquire(struct wm_softc *sc)
   11591 {
   11592 	uint32_t reg;
   11593 	int x;
   11594 	int ret = 0;
   11595 
   11596 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11597 		device_xname(sc->sc_dev), __func__));
   11598 
   11599 	if (sc->sc_type >= WM_T_ICH8) {
   11600 		ret = wm_get_nvm_ich8lan(sc);
   11601 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11602 		ret = wm_get_swfwhw_semaphore(sc);
   11603 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11604 		/* This will also do wm_get_swsm_semaphore() if needed */
   11605 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11606 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11607 		ret = wm_get_swsm_semaphore(sc);
   11608 	}
   11609 
   11610 	if (ret) {
   11611 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11612 			__func__);
   11613 		return 1;
   11614 	}
   11615 
   11616 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11617 		reg = CSR_READ(sc, WMREG_EECD);
   11618 
   11619 		/* Request EEPROM access. */
   11620 		reg |= EECD_EE_REQ;
   11621 		CSR_WRITE(sc, WMREG_EECD, reg);
   11622 
   11623 		/* ..and wait for it to be granted. */
   11624 		for (x = 0; x < 1000; x++) {
   11625 			reg = CSR_READ(sc, WMREG_EECD);
   11626 			if (reg & EECD_EE_GNT)
   11627 				break;
   11628 			delay(5);
   11629 		}
   11630 		if ((reg & EECD_EE_GNT) == 0) {
   11631 			aprint_error_dev(sc->sc_dev,
   11632 			    "could not acquire EEPROM GNT\n");
   11633 			reg &= ~EECD_EE_REQ;
   11634 			CSR_WRITE(sc, WMREG_EECD, reg);
   11635 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11636 				wm_put_swfwhw_semaphore(sc);
   11637 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   11638 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11639 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11640 				wm_put_swsm_semaphore(sc);
   11641 			return 1;
   11642 		}
   11643 	}
   11644 
   11645 	return 0;
   11646 }
   11647 
   11648 /*
   11649  * wm_nvm_release:
   11650  *
   11651  *	Release the EEPROM mutex.
   11652  */
   11653 static void
   11654 wm_nvm_release(struct wm_softc *sc)
   11655 {
   11656 	uint32_t reg;
   11657 
   11658 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11659 		device_xname(sc->sc_dev), __func__));
   11660 
   11661 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11662 		reg = CSR_READ(sc, WMREG_EECD);
   11663 		reg &= ~EECD_EE_REQ;
   11664 		CSR_WRITE(sc, WMREG_EECD, reg);
   11665 	}
   11666 
   11667 	if (sc->sc_type >= WM_T_ICH8) {
   11668 		wm_put_nvm_ich8lan(sc);
   11669 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11670 		wm_put_swfwhw_semaphore(sc);
   11671 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   11672 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11673 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11674 		wm_put_swsm_semaphore(sc);
   11675 }
   11676 
   11677 static int
   11678 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   11679 {
   11680 	uint32_t eecd = 0;
   11681 
   11682 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   11683 	    || sc->sc_type == WM_T_82583) {
   11684 		eecd = CSR_READ(sc, WMREG_EECD);
   11685 
   11686 		/* Isolate bits 15 & 16 */
   11687 		eecd = ((eecd >> 15) & 0x03);
   11688 
   11689 		/* If both bits are set, device is Flash type */
   11690 		if (eecd == 0x03)
   11691 			return 0;
   11692 	}
   11693 	return 1;
   11694 }
   11695 
   11696 static int
   11697 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   11698 {
   11699 	uint32_t eec;
   11700 
   11701 	eec = CSR_READ(sc, WMREG_EEC);
   11702 	if ((eec & EEC_FLASH_DETECTED) != 0)
   11703 		return 1;
   11704 
   11705 	return 0;
   11706 }
   11707 
   11708 /*
   11709  * wm_nvm_validate_checksum
   11710  *
   11711  * The checksum is defined as the sum of the first 64 (16 bit) words.
   11712  */
   11713 static int
   11714 wm_nvm_validate_checksum(struct wm_softc *sc)
   11715 {
   11716 	uint16_t checksum;
   11717 	uint16_t eeprom_data;
   11718 #ifdef WM_DEBUG
   11719 	uint16_t csum_wordaddr, valid_checksum;
   11720 #endif
   11721 	int i;
   11722 
   11723 	checksum = 0;
   11724 
   11725 	/* Don't check for I211 */
   11726 	if (sc->sc_type == WM_T_I211)
   11727 		return 0;
   11728 
   11729 #ifdef WM_DEBUG
   11730 	if (sc->sc_type == WM_T_PCH_LPT) {
   11731 		csum_wordaddr = NVM_OFF_COMPAT;
   11732 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   11733 	} else {
   11734 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   11735 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   11736 	}
   11737 
   11738 	/* Dump EEPROM image for debug */
   11739 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11740 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11741 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   11742 		/* XXX PCH_SPT? */
   11743 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   11744 		if ((eeprom_data & valid_checksum) == 0) {
   11745 			DPRINTF(WM_DEBUG_NVM,
   11746 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   11747 				device_xname(sc->sc_dev), eeprom_data,
   11748 				    valid_checksum));
   11749 		}
   11750 	}
   11751 
   11752 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   11753 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   11754 		for (i = 0; i < NVM_SIZE; i++) {
   11755 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11756 				printf("XXXX ");
   11757 			else
   11758 				printf("%04hx ", eeprom_data);
   11759 			if (i % 8 == 7)
   11760 				printf("\n");
   11761 		}
   11762 	}
   11763 
   11764 #endif /* WM_DEBUG */
   11765 
   11766 	for (i = 0; i < NVM_SIZE; i++) {
   11767 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11768 			return 1;
   11769 		checksum += eeprom_data;
   11770 	}
   11771 
   11772 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   11773 #ifdef WM_DEBUG
   11774 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   11775 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   11776 #endif
   11777 	}
   11778 
   11779 	return 0;
   11780 }
   11781 
   11782 static void
   11783 wm_nvm_version_invm(struct wm_softc *sc)
   11784 {
   11785 	uint32_t dword;
   11786 
   11787 	/*
   11788 	 * Linux's code to decode version is very strange, so we don't
   11789 	 * obey that algorithm and just use word 61 as the document.
   11790 	 * Perhaps it's not perfect though...
   11791 	 *
   11792 	 * Example:
   11793 	 *
   11794 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   11795 	 */
   11796 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   11797 	dword = __SHIFTOUT(dword, INVM_VER_1);
   11798 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   11799 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   11800 }
   11801 
   11802 static void
   11803 wm_nvm_version(struct wm_softc *sc)
   11804 {
   11805 	uint16_t major, minor, build, patch;
   11806 	uint16_t uid0, uid1;
   11807 	uint16_t nvm_data;
   11808 	uint16_t off;
   11809 	bool check_version = false;
   11810 	bool check_optionrom = false;
   11811 	bool have_build = false;
   11812 
   11813 	/*
   11814 	 * Version format:
   11815 	 *
   11816 	 * XYYZ
   11817 	 * X0YZ
   11818 	 * X0YY
   11819 	 *
   11820 	 * Example:
   11821 	 *
   11822 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   11823 	 *	82571	0x50a6	5.10.6?
   11824 	 *	82572	0x506a	5.6.10?
   11825 	 *	82572EI	0x5069	5.6.9?
   11826 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   11827 	 *		0x2013	2.1.3?
   11828 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   11829 	 */
   11830 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   11831 	switch (sc->sc_type) {
   11832 	case WM_T_82571:
   11833 	case WM_T_82572:
   11834 	case WM_T_82574:
   11835 	case WM_T_82583:
   11836 		check_version = true;
   11837 		check_optionrom = true;
   11838 		have_build = true;
   11839 		break;
   11840 	case WM_T_82575:
   11841 	case WM_T_82576:
   11842 	case WM_T_82580:
   11843 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   11844 			check_version = true;
   11845 		break;
   11846 	case WM_T_I211:
   11847 		wm_nvm_version_invm(sc);
   11848 		goto printver;
   11849 	case WM_T_I210:
   11850 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   11851 			wm_nvm_version_invm(sc);
   11852 			goto printver;
   11853 		}
   11854 		/* FALLTHROUGH */
   11855 	case WM_T_I350:
   11856 	case WM_T_I354:
   11857 		check_version = true;
   11858 		check_optionrom = true;
   11859 		break;
   11860 	default:
   11861 		return;
   11862 	}
   11863 	if (check_version) {
   11864 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   11865 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   11866 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   11867 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   11868 			build = nvm_data & NVM_BUILD_MASK;
   11869 			have_build = true;
   11870 		} else
   11871 			minor = nvm_data & 0x00ff;
   11872 
   11873 		/* Decimal */
   11874 		minor = (minor / 16) * 10 + (minor % 16);
   11875 		sc->sc_nvm_ver_major = major;
   11876 		sc->sc_nvm_ver_minor = minor;
   11877 
   11878 printver:
   11879 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   11880 		    sc->sc_nvm_ver_minor);
   11881 		if (have_build) {
   11882 			sc->sc_nvm_ver_build = build;
   11883 			aprint_verbose(".%d", build);
   11884 		}
   11885 	}
   11886 	if (check_optionrom) {
   11887 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   11888 		/* Option ROM Version */
   11889 		if ((off != 0x0000) && (off != 0xffff)) {
   11890 			off += NVM_COMBO_VER_OFF;
   11891 			wm_nvm_read(sc, off + 1, 1, &uid1);
   11892 			wm_nvm_read(sc, off, 1, &uid0);
   11893 			if ((uid0 != 0) && (uid0 != 0xffff)
   11894 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   11895 				/* 16bits */
   11896 				major = uid0 >> 8;
   11897 				build = (uid0 << 8) | (uid1 >> 8);
   11898 				patch = uid1 & 0x00ff;
   11899 				aprint_verbose(", option ROM Version %d.%d.%d",
   11900 				    major, build, patch);
   11901 			}
   11902 		}
   11903 	}
   11904 
   11905 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   11906 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   11907 }
   11908 
   11909 /*
   11910  * wm_nvm_read:
   11911  *
   11912  *	Read data from the serial EEPROM.
   11913  */
   11914 static int
   11915 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11916 {
   11917 	int rv;
   11918 
   11919 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11920 		device_xname(sc->sc_dev), __func__));
   11921 
   11922 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   11923 		return 1;
   11924 
   11925 	if (wm_nvm_acquire(sc))
   11926 		return 1;
   11927 
   11928 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11929 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11930 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11931 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11932 	else if (sc->sc_type == WM_T_PCH_SPT)
   11933 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11934 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11935 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11936 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11937 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11938 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11939 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11940 	else
   11941 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11942 
   11943 	wm_nvm_release(sc);
   11944 	return rv;
   11945 }
   11946 
   11947 /*
   11948  * Hardware semaphores.
   11949  * Very complexed...
   11950  */
   11951 
   11952 static int
   11953 wm_get_null(struct wm_softc *sc)
   11954 {
   11955 
   11956 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11957 		device_xname(sc->sc_dev), __func__));
   11958 	return 0;
   11959 }
   11960 
   11961 static void
   11962 wm_put_null(struct wm_softc *sc)
   11963 {
   11964 
   11965 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11966 		device_xname(sc->sc_dev), __func__));
   11967 	return;
   11968 }
   11969 
   11970 /*
   11971  * Get hardware semaphore.
   11972  * Same as e1000_get_hw_semaphore_generic()
   11973  */
   11974 static int
   11975 wm_get_swsm_semaphore(struct wm_softc *sc)
   11976 {
   11977 	int32_t timeout;
   11978 	uint32_t swsm;
   11979 
   11980 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11981 		device_xname(sc->sc_dev), __func__));
   11982 	KASSERT(sc->sc_nvm_wordsize > 0);
   11983 
   11984 	/* Get the SW semaphore. */
   11985 	timeout = sc->sc_nvm_wordsize + 1;
   11986 	while (timeout) {
   11987 		swsm = CSR_READ(sc, WMREG_SWSM);
   11988 
   11989 		if ((swsm & SWSM_SMBI) == 0)
   11990 			break;
   11991 
   11992 		delay(50);
   11993 		timeout--;
   11994 	}
   11995 
   11996 	if (timeout == 0) {
   11997 		aprint_error_dev(sc->sc_dev,
   11998 		    "could not acquire SWSM SMBI\n");
   11999 		return 1;
   12000 	}
   12001 
   12002 	/* Get the FW semaphore. */
   12003 	timeout = sc->sc_nvm_wordsize + 1;
   12004 	while (timeout) {
   12005 		swsm = CSR_READ(sc, WMREG_SWSM);
   12006 		swsm |= SWSM_SWESMBI;
   12007 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12008 		/* If we managed to set the bit we got the semaphore. */
   12009 		swsm = CSR_READ(sc, WMREG_SWSM);
   12010 		if (swsm & SWSM_SWESMBI)
   12011 			break;
   12012 
   12013 		delay(50);
   12014 		timeout--;
   12015 	}
   12016 
   12017 	if (timeout == 0) {
   12018 		aprint_error_dev(sc->sc_dev,
   12019 		    "could not acquire SWSM SWESMBI\n");
   12020 		/* Release semaphores */
   12021 		wm_put_swsm_semaphore(sc);
   12022 		return 1;
   12023 	}
   12024 	return 0;
   12025 }
   12026 
   12027 /*
   12028  * Put hardware semaphore.
   12029  * Same as e1000_put_hw_semaphore_generic()
   12030  */
   12031 static void
   12032 wm_put_swsm_semaphore(struct wm_softc *sc)
   12033 {
   12034 	uint32_t swsm;
   12035 
   12036 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12037 		device_xname(sc->sc_dev), __func__));
   12038 
   12039 	swsm = CSR_READ(sc, WMREG_SWSM);
   12040 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12041 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12042 }
   12043 
   12044 /*
   12045  * Get SW/FW semaphore.
   12046  * Same as e1000_acquire_swfw_sync_82575().
   12047  */
   12048 static int
   12049 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12050 {
   12051 	uint32_t swfw_sync;
   12052 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12053 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12054 	int timeout = 200;
   12055 
   12056 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12057 		device_xname(sc->sc_dev), __func__));
   12058 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12059 
   12060 	for (timeout = 0; timeout < 200; timeout++) {
   12061 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12062 			if (wm_get_swsm_semaphore(sc)) {
   12063 				aprint_error_dev(sc->sc_dev,
   12064 				    "%s: failed to get semaphore\n",
   12065 				    __func__);
   12066 				return 1;
   12067 			}
   12068 		}
   12069 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12070 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12071 			swfw_sync |= swmask;
   12072 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12073 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12074 				wm_put_swsm_semaphore(sc);
   12075 			return 0;
   12076 		}
   12077 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12078 			wm_put_swsm_semaphore(sc);
   12079 		delay(5000);
   12080 	}
   12081 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12082 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12083 	return 1;
   12084 }
   12085 
   12086 static void
   12087 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12088 {
   12089 	uint32_t swfw_sync;
   12090 
   12091 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12092 		device_xname(sc->sc_dev), __func__));
   12093 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12094 
   12095 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12096 		while (wm_get_swsm_semaphore(sc) != 0)
   12097 			continue;
   12098 	}
   12099 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12100 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12101 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12102 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12103 		wm_put_swsm_semaphore(sc);
   12104 }
   12105 
   12106 static int
   12107 wm_get_phy_82575(struct wm_softc *sc)
   12108 {
   12109 
   12110 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12111 		device_xname(sc->sc_dev), __func__));
   12112 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12113 }
   12114 
   12115 static void
   12116 wm_put_phy_82575(struct wm_softc *sc)
   12117 {
   12118 
   12119 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12120 		device_xname(sc->sc_dev), __func__));
   12121 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12122 }
   12123 
   12124 static int
   12125 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12126 {
   12127 	uint32_t ext_ctrl;
   12128 	int timeout = 200;
   12129 
   12130 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12131 		device_xname(sc->sc_dev), __func__));
   12132 
   12133 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12134 	for (timeout = 0; timeout < 200; timeout++) {
   12135 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12136 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12137 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12138 
   12139 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12140 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12141 			return 0;
   12142 		delay(5000);
   12143 	}
   12144 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12145 	    device_xname(sc->sc_dev), ext_ctrl);
   12146 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12147 	return 1;
   12148 }
   12149 
   12150 static void
   12151 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12152 {
   12153 	uint32_t ext_ctrl;
   12154 
   12155 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12156 		device_xname(sc->sc_dev), __func__));
   12157 
   12158 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12159 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12160 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12161 
   12162 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12163 }
   12164 
   12165 static int
   12166 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12167 {
   12168 	uint32_t ext_ctrl;
   12169 	int timeout;
   12170 
   12171 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12172 		device_xname(sc->sc_dev), __func__));
   12173 	mutex_enter(sc->sc_ich_phymtx);
   12174 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12175 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12176 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12177 			break;
   12178 		delay(1000);
   12179 	}
   12180 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12181 		printf("%s: SW has already locked the resource\n",
   12182 		    device_xname(sc->sc_dev));
   12183 		goto out;
   12184 	}
   12185 
   12186 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12187 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12188 	for (timeout = 0; timeout < 1000; timeout++) {
   12189 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12190 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12191 			break;
   12192 		delay(1000);
   12193 	}
   12194 	if (timeout >= 1000) {
   12195 		printf("%s: failed to acquire semaphore\n",
   12196 		    device_xname(sc->sc_dev));
   12197 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12198 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12199 		goto out;
   12200 	}
   12201 	return 0;
   12202 
   12203 out:
   12204 	mutex_exit(sc->sc_ich_phymtx);
   12205 	return 1;
   12206 }
   12207 
   12208 static void
   12209 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12210 {
   12211 	uint32_t ext_ctrl;
   12212 
   12213 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12214 		device_xname(sc->sc_dev), __func__));
   12215 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12216 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12217 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12218 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12219 	} else {
   12220 		printf("%s: Semaphore unexpectedly released\n",
   12221 		    device_xname(sc->sc_dev));
   12222 	}
   12223 
   12224 	mutex_exit(sc->sc_ich_phymtx);
   12225 }
   12226 
   12227 static int
   12228 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12229 {
   12230 
   12231 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12232 		device_xname(sc->sc_dev), __func__));
   12233 	mutex_enter(sc->sc_ich_nvmmtx);
   12234 
   12235 	return 0;
   12236 }
   12237 
   12238 static void
   12239 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12240 {
   12241 
   12242 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12243 		device_xname(sc->sc_dev), __func__));
   12244 	mutex_exit(sc->sc_ich_nvmmtx);
   12245 }
   12246 
   12247 static int
   12248 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12249 {
   12250 	int i = 0;
   12251 	uint32_t reg;
   12252 
   12253 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12254 		device_xname(sc->sc_dev), __func__));
   12255 
   12256 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12257 	do {
   12258 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12259 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12260 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12261 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12262 			break;
   12263 		delay(2*1000);
   12264 		i++;
   12265 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12266 
   12267 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12268 		wm_put_hw_semaphore_82573(sc);
   12269 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12270 		    device_xname(sc->sc_dev));
   12271 		return -1;
   12272 	}
   12273 
   12274 	return 0;
   12275 }
   12276 
   12277 static void
   12278 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12279 {
   12280 	uint32_t reg;
   12281 
   12282 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12283 		device_xname(sc->sc_dev), __func__));
   12284 
   12285 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12286 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12287 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12288 }
   12289 
   12290 /*
   12291  * Management mode and power management related subroutines.
   12292  * BMC, AMT, suspend/resume and EEE.
   12293  */
   12294 
   12295 #ifdef WM_WOL
   12296 static int
   12297 wm_check_mng_mode(struct wm_softc *sc)
   12298 {
   12299 	int rv;
   12300 
   12301 	switch (sc->sc_type) {
   12302 	case WM_T_ICH8:
   12303 	case WM_T_ICH9:
   12304 	case WM_T_ICH10:
   12305 	case WM_T_PCH:
   12306 	case WM_T_PCH2:
   12307 	case WM_T_PCH_LPT:
   12308 	case WM_T_PCH_SPT:
   12309 		rv = wm_check_mng_mode_ich8lan(sc);
   12310 		break;
   12311 	case WM_T_82574:
   12312 	case WM_T_82583:
   12313 		rv = wm_check_mng_mode_82574(sc);
   12314 		break;
   12315 	case WM_T_82571:
   12316 	case WM_T_82572:
   12317 	case WM_T_82573:
   12318 	case WM_T_80003:
   12319 		rv = wm_check_mng_mode_generic(sc);
   12320 		break;
   12321 	default:
   12322 		/* noting to do */
   12323 		rv = 0;
   12324 		break;
   12325 	}
   12326 
   12327 	return rv;
   12328 }
   12329 
   12330 static int
   12331 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12332 {
   12333 	uint32_t fwsm;
   12334 
   12335 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12336 
   12337 	if (((fwsm & FWSM_FW_VALID) != 0)
   12338 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12339 		return 1;
   12340 
   12341 	return 0;
   12342 }
   12343 
   12344 static int
   12345 wm_check_mng_mode_82574(struct wm_softc *sc)
   12346 {
   12347 	uint16_t data;
   12348 
   12349 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12350 
   12351 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12352 		return 1;
   12353 
   12354 	return 0;
   12355 }
   12356 
   12357 static int
   12358 wm_check_mng_mode_generic(struct wm_softc *sc)
   12359 {
   12360 	uint32_t fwsm;
   12361 
   12362 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12363 
   12364 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12365 		return 1;
   12366 
   12367 	return 0;
   12368 }
   12369 #endif /* WM_WOL */
   12370 
   12371 static int
   12372 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12373 {
   12374 	uint32_t manc, fwsm, factps;
   12375 
   12376 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12377 		return 0;
   12378 
   12379 	manc = CSR_READ(sc, WMREG_MANC);
   12380 
   12381 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12382 		device_xname(sc->sc_dev), manc));
   12383 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12384 		return 0;
   12385 
   12386 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12387 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12388 		factps = CSR_READ(sc, WMREG_FACTPS);
   12389 		if (((factps & FACTPS_MNGCG) == 0)
   12390 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12391 			return 1;
   12392 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12393 		uint16_t data;
   12394 
   12395 		factps = CSR_READ(sc, WMREG_FACTPS);
   12396 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12397 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12398 			device_xname(sc->sc_dev), factps, data));
   12399 		if (((factps & FACTPS_MNGCG) == 0)
   12400 		    && ((data & NVM_CFG2_MNGM_MASK)
   12401 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12402 			return 1;
   12403 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12404 	    && ((manc & MANC_ASF_EN) == 0))
   12405 		return 1;
   12406 
   12407 	return 0;
   12408 }
   12409 
   12410 static bool
   12411 wm_phy_resetisblocked(struct wm_softc *sc)
   12412 {
   12413 	bool blocked = false;
   12414 	uint32_t reg;
   12415 	int i = 0;
   12416 
   12417 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12418 		device_xname(sc->sc_dev), __func__));
   12419 
   12420 	switch (sc->sc_type) {
   12421 	case WM_T_ICH8:
   12422 	case WM_T_ICH9:
   12423 	case WM_T_ICH10:
   12424 	case WM_T_PCH:
   12425 	case WM_T_PCH2:
   12426 	case WM_T_PCH_LPT:
   12427 	case WM_T_PCH_SPT:
   12428 		do {
   12429 			reg = CSR_READ(sc, WMREG_FWSM);
   12430 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12431 				blocked = true;
   12432 				delay(10*1000);
   12433 				continue;
   12434 			}
   12435 			blocked = false;
   12436 		} while (blocked && (i++ < 30));
   12437 		return blocked;
   12438 		break;
   12439 	case WM_T_82571:
   12440 	case WM_T_82572:
   12441 	case WM_T_82573:
   12442 	case WM_T_82574:
   12443 	case WM_T_82583:
   12444 	case WM_T_80003:
   12445 		reg = CSR_READ(sc, WMREG_MANC);
   12446 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12447 			return true;
   12448 		else
   12449 			return false;
   12450 		break;
   12451 	default:
   12452 		/* no problem */
   12453 		break;
   12454 	}
   12455 
   12456 	return false;
   12457 }
   12458 
   12459 static void
   12460 wm_get_hw_control(struct wm_softc *sc)
   12461 {
   12462 	uint32_t reg;
   12463 
   12464 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12465 		device_xname(sc->sc_dev), __func__));
   12466 
   12467 	if (sc->sc_type == WM_T_82573) {
   12468 		reg = CSR_READ(sc, WMREG_SWSM);
   12469 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12470 	} else if (sc->sc_type >= WM_T_82571) {
   12471 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12472 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12473 	}
   12474 }
   12475 
   12476 static void
   12477 wm_release_hw_control(struct wm_softc *sc)
   12478 {
   12479 	uint32_t reg;
   12480 
   12481 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12482 		device_xname(sc->sc_dev), __func__));
   12483 
   12484 	if (sc->sc_type == WM_T_82573) {
   12485 		reg = CSR_READ(sc, WMREG_SWSM);
   12486 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12487 	} else if (sc->sc_type >= WM_T_82571) {
   12488 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12489 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12490 	}
   12491 }
   12492 
   12493 static void
   12494 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12495 {
   12496 	uint32_t reg;
   12497 
   12498 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12499 		device_xname(sc->sc_dev), __func__));
   12500 
   12501 	if (sc->sc_type < WM_T_PCH2)
   12502 		return;
   12503 
   12504 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12505 
   12506 	if (gate)
   12507 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12508 	else
   12509 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12510 
   12511 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12512 }
   12513 
   12514 static void
   12515 wm_smbustopci(struct wm_softc *sc)
   12516 {
   12517 	uint32_t fwsm, reg;
   12518 	int rv = 0;
   12519 
   12520 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12521 		device_xname(sc->sc_dev), __func__));
   12522 
   12523 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12524 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12525 
   12526 	/* Disable ULP */
   12527 	wm_ulp_disable(sc);
   12528 
   12529 	/* Acquire PHY semaphore */
   12530 	sc->phy.acquire(sc);
   12531 
   12532 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12533 	switch (sc->sc_type) {
   12534 	case WM_T_PCH_LPT:
   12535 	case WM_T_PCH_SPT:
   12536 		if (wm_phy_is_accessible_pchlan(sc))
   12537 			break;
   12538 
   12539 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12540 		reg |= CTRL_EXT_FORCE_SMBUS;
   12541 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12542 #if 0
   12543 		/* XXX Isn't this required??? */
   12544 		CSR_WRITE_FLUSH(sc);
   12545 #endif
   12546 		delay(50 * 1000);
   12547 		/* FALLTHROUGH */
   12548 	case WM_T_PCH2:
   12549 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12550 			break;
   12551 		/* FALLTHROUGH */
   12552 	case WM_T_PCH:
   12553 		if (sc->sc_type == WM_T_PCH)
   12554 			if ((fwsm & FWSM_FW_VALID) != 0)
   12555 				break;
   12556 
   12557 		if (wm_phy_resetisblocked(sc) == true) {
   12558 			printf("XXX reset is blocked(3)\n");
   12559 			break;
   12560 		}
   12561 
   12562 		wm_toggle_lanphypc_pch_lpt(sc);
   12563 
   12564 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12565 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12566 				break;
   12567 
   12568 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12569 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12570 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12571 
   12572 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12573 				break;
   12574 			rv = -1;
   12575 		}
   12576 		break;
   12577 	default:
   12578 		break;
   12579 	}
   12580 
   12581 	/* Release semaphore */
   12582 	sc->phy.release(sc);
   12583 
   12584 	if (rv == 0) {
   12585 		if (wm_phy_resetisblocked(sc)) {
   12586 			printf("XXX reset is blocked(4)\n");
   12587 			goto out;
   12588 		}
   12589 		wm_reset_phy(sc);
   12590 		if (wm_phy_resetisblocked(sc))
   12591 			printf("XXX reset is blocked(4)\n");
   12592 	}
   12593 
   12594 out:
   12595 	/*
   12596 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12597 	 */
   12598 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12599 		delay(10*1000);
   12600 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12601 	}
   12602 }
   12603 
   12604 static void
   12605 wm_init_manageability(struct wm_softc *sc)
   12606 {
   12607 
   12608 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12609 		device_xname(sc->sc_dev), __func__));
   12610 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12611 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12612 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12613 
   12614 		/* Disable hardware interception of ARP */
   12615 		manc &= ~MANC_ARP_EN;
   12616 
   12617 		/* Enable receiving management packets to the host */
   12618 		if (sc->sc_type >= WM_T_82571) {
   12619 			manc |= MANC_EN_MNG2HOST;
   12620 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12621 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12622 		}
   12623 
   12624 		CSR_WRITE(sc, WMREG_MANC, manc);
   12625 	}
   12626 }
   12627 
   12628 static void
   12629 wm_release_manageability(struct wm_softc *sc)
   12630 {
   12631 
   12632 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12633 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12634 
   12635 		manc |= MANC_ARP_EN;
   12636 		if (sc->sc_type >= WM_T_82571)
   12637 			manc &= ~MANC_EN_MNG2HOST;
   12638 
   12639 		CSR_WRITE(sc, WMREG_MANC, manc);
   12640 	}
   12641 }
   12642 
   12643 static void
   12644 wm_get_wakeup(struct wm_softc *sc)
   12645 {
   12646 
   12647 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   12648 	switch (sc->sc_type) {
   12649 	case WM_T_82573:
   12650 	case WM_T_82583:
   12651 		sc->sc_flags |= WM_F_HAS_AMT;
   12652 		/* FALLTHROUGH */
   12653 	case WM_T_80003:
   12654 	case WM_T_82575:
   12655 	case WM_T_82576:
   12656 	case WM_T_82580:
   12657 	case WM_T_I350:
   12658 	case WM_T_I354:
   12659 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   12660 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   12661 		/* FALLTHROUGH */
   12662 	case WM_T_82541:
   12663 	case WM_T_82541_2:
   12664 	case WM_T_82547:
   12665 	case WM_T_82547_2:
   12666 	case WM_T_82571:
   12667 	case WM_T_82572:
   12668 	case WM_T_82574:
   12669 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12670 		break;
   12671 	case WM_T_ICH8:
   12672 	case WM_T_ICH9:
   12673 	case WM_T_ICH10:
   12674 	case WM_T_PCH:
   12675 	case WM_T_PCH2:
   12676 	case WM_T_PCH_LPT:
   12677 	case WM_T_PCH_SPT:
   12678 		sc->sc_flags |= WM_F_HAS_AMT;
   12679 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12680 		break;
   12681 	default:
   12682 		break;
   12683 	}
   12684 
   12685 	/* 1: HAS_MANAGE */
   12686 	if (wm_enable_mng_pass_thru(sc) != 0)
   12687 		sc->sc_flags |= WM_F_HAS_MANAGE;
   12688 
   12689 #ifdef WM_DEBUG
   12690 	printf("\n");
   12691 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   12692 		printf("HAS_AMT,");
   12693 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   12694 		printf("ARC_SUBSYS_VALID,");
   12695 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   12696 		printf("ASF_FIRMWARE_PRES,");
   12697 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   12698 		printf("HAS_MANAGE,");
   12699 	printf("\n");
   12700 #endif
   12701 	/*
   12702 	 * Note that the WOL flags is set after the resetting of the eeprom
   12703 	 * stuff
   12704 	 */
   12705 }
   12706 
   12707 /*
   12708  * Unconfigure Ultra Low Power mode.
   12709  * Only for I217 and newer (see below).
   12710  */
   12711 static void
   12712 wm_ulp_disable(struct wm_softc *sc)
   12713 {
   12714 	uint32_t reg;
   12715 	int i = 0;
   12716 
   12717 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12718 		device_xname(sc->sc_dev), __func__));
   12719 	/* Exclude old devices */
   12720 	if ((sc->sc_type < WM_T_PCH_LPT)
   12721 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   12722 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   12723 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   12724 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   12725 		return;
   12726 
   12727 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   12728 		/* Request ME un-configure ULP mode in the PHY */
   12729 		reg = CSR_READ(sc, WMREG_H2ME);
   12730 		reg &= ~H2ME_ULP;
   12731 		reg |= H2ME_ENFORCE_SETTINGS;
   12732 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12733 
   12734 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   12735 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   12736 			if (i++ == 30) {
   12737 				printf("%s timed out\n", __func__);
   12738 				return;
   12739 			}
   12740 			delay(10 * 1000);
   12741 		}
   12742 		reg = CSR_READ(sc, WMREG_H2ME);
   12743 		reg &= ~H2ME_ENFORCE_SETTINGS;
   12744 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12745 
   12746 		return;
   12747 	}
   12748 
   12749 	/* Acquire semaphore */
   12750 	sc->phy.acquire(sc);
   12751 
   12752 	/* Toggle LANPHYPC */
   12753 	wm_toggle_lanphypc_pch_lpt(sc);
   12754 
   12755 	/* Unforce SMBus mode in PHY */
   12756 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12757 	if (reg == 0x0000 || reg == 0xffff) {
   12758 		uint32_t reg2;
   12759 
   12760 		printf("%s: Force SMBus first.\n", __func__);
   12761 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   12762 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   12763 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   12764 		delay(50 * 1000);
   12765 
   12766 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12767 	}
   12768 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   12769 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   12770 
   12771 	/* Unforce SMBus mode in MAC */
   12772 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12773 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   12774 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12775 
   12776 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   12777 	reg |= HV_PM_CTRL_K1_ENA;
   12778 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   12779 
   12780 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   12781 	reg &= ~(I218_ULP_CONFIG1_IND
   12782 	    | I218_ULP_CONFIG1_STICKY_ULP
   12783 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   12784 	    | I218_ULP_CONFIG1_WOL_HOST
   12785 	    | I218_ULP_CONFIG1_INBAND_EXIT
   12786 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   12787 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   12788 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   12789 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12790 	reg |= I218_ULP_CONFIG1_START;
   12791 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12792 
   12793 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   12794 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   12795 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   12796 
   12797 	/* Release semaphore */
   12798 	sc->phy.release(sc);
   12799 	wm_gmii_reset(sc);
   12800 	delay(50 * 1000);
   12801 }
   12802 
   12803 /* WOL in the newer chipset interfaces (pchlan) */
   12804 static void
   12805 wm_enable_phy_wakeup(struct wm_softc *sc)
   12806 {
   12807 #if 0
   12808 	uint16_t preg;
   12809 
   12810 	/* Copy MAC RARs to PHY RARs */
   12811 
   12812 	/* Copy MAC MTA to PHY MTA */
   12813 
   12814 	/* Configure PHY Rx Control register */
   12815 
   12816 	/* Enable PHY wakeup in MAC register */
   12817 
   12818 	/* Configure and enable PHY wakeup in PHY registers */
   12819 
   12820 	/* Activate PHY wakeup */
   12821 
   12822 	/* XXX */
   12823 #endif
   12824 }
   12825 
   12826 /* Power down workaround on D3 */
   12827 static void
   12828 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   12829 {
   12830 	uint32_t reg;
   12831 	int i;
   12832 
   12833 	for (i = 0; i < 2; i++) {
   12834 		/* Disable link */
   12835 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12836 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12837 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12838 
   12839 		/*
   12840 		 * Call gig speed drop workaround on Gig disable before
   12841 		 * accessing any PHY registers
   12842 		 */
   12843 		if (sc->sc_type == WM_T_ICH8)
   12844 			wm_gig_downshift_workaround_ich8lan(sc);
   12845 
   12846 		/* Write VR power-down enable */
   12847 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12848 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12849 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   12850 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   12851 
   12852 		/* Read it back and test */
   12853 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12854 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12855 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   12856 			break;
   12857 
   12858 		/* Issue PHY reset and repeat at most one more time */
   12859 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   12860 	}
   12861 }
   12862 
   12863 static void
   12864 wm_enable_wakeup(struct wm_softc *sc)
   12865 {
   12866 	uint32_t reg, pmreg;
   12867 	pcireg_t pmode;
   12868 
   12869 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12870 		device_xname(sc->sc_dev), __func__));
   12871 
   12872 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12873 		&pmreg, NULL) == 0)
   12874 		return;
   12875 
   12876 	/* Advertise the wakeup capability */
   12877 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   12878 	    | CTRL_SWDPIN(3));
   12879 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   12880 
   12881 	/* ICH workaround */
   12882 	switch (sc->sc_type) {
   12883 	case WM_T_ICH8:
   12884 	case WM_T_ICH9:
   12885 	case WM_T_ICH10:
   12886 	case WM_T_PCH:
   12887 	case WM_T_PCH2:
   12888 	case WM_T_PCH_LPT:
   12889 	case WM_T_PCH_SPT:
   12890 		/* Disable gig during WOL */
   12891 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12892 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   12893 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12894 		if (sc->sc_type == WM_T_PCH)
   12895 			wm_gmii_reset(sc);
   12896 
   12897 		/* Power down workaround */
   12898 		if (sc->sc_phytype == WMPHY_82577) {
   12899 			struct mii_softc *child;
   12900 
   12901 			/* Assume that the PHY is copper */
   12902 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12903 			if (child->mii_mpd_rev <= 2)
   12904 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   12905 				    (768 << 5) | 25, 0x0444); /* magic num */
   12906 		}
   12907 		break;
   12908 	default:
   12909 		break;
   12910 	}
   12911 
   12912 	/* Keep the laser running on fiber adapters */
   12913 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   12914 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12915 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12916 		reg |= CTRL_EXT_SWDPIN(3);
   12917 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12918 	}
   12919 
   12920 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   12921 #if 0	/* for the multicast packet */
   12922 	reg |= WUFC_MC;
   12923 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   12924 #endif
   12925 
   12926 	if (sc->sc_type >= WM_T_PCH)
   12927 		wm_enable_phy_wakeup(sc);
   12928 	else {
   12929 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   12930 		CSR_WRITE(sc, WMREG_WUFC, reg);
   12931 	}
   12932 
   12933 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12934 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12935 		|| (sc->sc_type == WM_T_PCH2))
   12936 		    && (sc->sc_phytype == WMPHY_IGP_3))
   12937 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   12938 
   12939 	/* Request PME */
   12940 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   12941 #if 0
   12942 	/* Disable WOL */
   12943 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   12944 #else
   12945 	/* For WOL */
   12946 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   12947 #endif
   12948 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   12949 }
   12950 
   12951 /* LPLU */
   12952 
   12953 static void
   12954 wm_lplu_d0_disable(struct wm_softc *sc)
   12955 {
   12956 	uint32_t reg;
   12957 
   12958 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12959 		device_xname(sc->sc_dev), __func__));
   12960 
   12961 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12962 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   12963 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12964 }
   12965 
   12966 static void
   12967 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   12968 {
   12969 	uint32_t reg;
   12970 
   12971 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12972 		device_xname(sc->sc_dev), __func__));
   12973 
   12974 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   12975 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   12976 	reg |= HV_OEM_BITS_ANEGNOW;
   12977 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   12978 }
   12979 
   12980 /* EEE */
   12981 
   12982 static void
   12983 wm_set_eee_i350(struct wm_softc *sc)
   12984 {
   12985 	uint32_t ipcnfg, eeer;
   12986 
   12987 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   12988 	eeer = CSR_READ(sc, WMREG_EEER);
   12989 
   12990 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   12991 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12992 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12993 		    | EEER_LPI_FC);
   12994 	} else {
   12995 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12996 		ipcnfg &= ~IPCNFG_10BASE_TE;
   12997 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12998 		    | EEER_LPI_FC);
   12999 	}
   13000 
   13001 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13002 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13003 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13004 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13005 }
   13006 
   13007 /*
   13008  * Workarounds (mainly PHY related).
   13009  * Basically, PHY's workarounds are in the PHY drivers.
   13010  */
   13011 
   13012 /* Work-around for 82566 Kumeran PCS lock loss */
   13013 static void
   13014 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13015 {
   13016 #if 0
   13017 	int miistatus, active, i;
   13018 	int reg;
   13019 
   13020 	miistatus = sc->sc_mii.mii_media_status;
   13021 
   13022 	/* If the link is not up, do nothing */
   13023 	if ((miistatus & IFM_ACTIVE) == 0)
   13024 		return;
   13025 
   13026 	active = sc->sc_mii.mii_media_active;
   13027 
   13028 	/* Nothing to do if the link is other than 1Gbps */
   13029 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   13030 		return;
   13031 
   13032 	for (i = 0; i < 10; i++) {
   13033 		/* read twice */
   13034 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13035 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13036 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13037 			goto out;	/* GOOD! */
   13038 
   13039 		/* Reset the PHY */
   13040 		wm_gmii_reset(sc);
   13041 		delay(5*1000);
   13042 	}
   13043 
   13044 	/* Disable GigE link negotiation */
   13045 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13046 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13047 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13048 
   13049 	/*
   13050 	 * Call gig speed drop workaround on Gig disable before accessing
   13051 	 * any PHY registers.
   13052 	 */
   13053 	wm_gig_downshift_workaround_ich8lan(sc);
   13054 
   13055 out:
   13056 	return;
   13057 #endif
   13058 }
   13059 
   13060 /* WOL from S5 stops working */
   13061 static void
   13062 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13063 {
   13064 	uint16_t kmrn_reg;
   13065 
   13066 	/* Only for igp3 */
   13067 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13068 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13069 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13070 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13071 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13072 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13073 	}
   13074 }
   13075 
   13076 /*
   13077  * Workaround for pch's PHYs
   13078  * XXX should be moved to new PHY driver?
   13079  */
   13080 static void
   13081 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13082 {
   13083 
   13084 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13085 		device_xname(sc->sc_dev), __func__));
   13086 	KASSERT(sc->sc_type == WM_T_PCH);
   13087 
   13088 	if (sc->sc_phytype == WMPHY_82577)
   13089 		wm_set_mdio_slow_mode_hv(sc);
   13090 
   13091 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13092 
   13093 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13094 
   13095 	/* 82578 */
   13096 	if (sc->sc_phytype == WMPHY_82578) {
   13097 		struct mii_softc *child;
   13098 
   13099 		/*
   13100 		 * Return registers to default by doing a soft reset then
   13101 		 * writing 0x3140 to the control register
   13102 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13103 		 */
   13104 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13105 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13106 			PHY_RESET(child);
   13107 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13108 			    0x3140);
   13109 		}
   13110 	}
   13111 
   13112 	/* Select page 0 */
   13113 	sc->phy.acquire(sc);
   13114 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13115 	sc->phy.release(sc);
   13116 
   13117 	/*
   13118 	 * Configure the K1 Si workaround during phy reset assuming there is
   13119 	 * link so that it disables K1 if link is in 1Gbps.
   13120 	 */
   13121 	wm_k1_gig_workaround_hv(sc, 1);
   13122 }
   13123 
   13124 static void
   13125 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13126 {
   13127 
   13128 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13129 		device_xname(sc->sc_dev), __func__));
   13130 	KASSERT(sc->sc_type == WM_T_PCH2);
   13131 
   13132 	wm_set_mdio_slow_mode_hv(sc);
   13133 }
   13134 
   13135 static int
   13136 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13137 {
   13138 	int k1_enable = sc->sc_nvm_k1_enabled;
   13139 
   13140 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13141 		device_xname(sc->sc_dev), __func__));
   13142 
   13143 	if (sc->phy.acquire(sc) != 0)
   13144 		return -1;
   13145 
   13146 	if (link) {
   13147 		k1_enable = 0;
   13148 
   13149 		/* Link stall fix for link up */
   13150 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13151 	} else {
   13152 		/* Link stall fix for link down */
   13153 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13154 	}
   13155 
   13156 	wm_configure_k1_ich8lan(sc, k1_enable);
   13157 	sc->phy.release(sc);
   13158 
   13159 	return 0;
   13160 }
   13161 
   13162 static void
   13163 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13164 {
   13165 	uint32_t reg;
   13166 
   13167 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13168 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13169 	    reg | HV_KMRN_MDIO_SLOW);
   13170 }
   13171 
   13172 static void
   13173 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13174 {
   13175 	uint32_t ctrl, ctrl_ext, tmp;
   13176 	uint16_t kmrn_reg;
   13177 
   13178 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13179 
   13180 	if (k1_enable)
   13181 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13182 	else
   13183 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13184 
   13185 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13186 
   13187 	delay(20);
   13188 
   13189 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13190 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13191 
   13192 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13193 	tmp |= CTRL_FRCSPD;
   13194 
   13195 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13196 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13197 	CSR_WRITE_FLUSH(sc);
   13198 	delay(20);
   13199 
   13200 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13201 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13202 	CSR_WRITE_FLUSH(sc);
   13203 	delay(20);
   13204 }
   13205 
   13206 /* special case - for 82575 - need to do manual init ... */
   13207 static void
   13208 wm_reset_init_script_82575(struct wm_softc *sc)
   13209 {
   13210 	/*
   13211 	 * remark: this is untested code - we have no board without EEPROM
   13212 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13213 	 */
   13214 
   13215 	/* SerDes configuration via SERDESCTRL */
   13216 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13217 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13218 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13219 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13220 
   13221 	/* CCM configuration via CCMCTL register */
   13222 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13223 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13224 
   13225 	/* PCIe lanes configuration */
   13226 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13227 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13228 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13229 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13230 
   13231 	/* PCIe PLL Configuration */
   13232 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13233 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13234 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13235 }
   13236 
   13237 static void
   13238 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13239 {
   13240 	uint32_t reg;
   13241 	uint16_t nvmword;
   13242 	int rv;
   13243 
   13244 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13245 		return;
   13246 
   13247 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13248 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13249 	if (rv != 0) {
   13250 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13251 		    __func__);
   13252 		return;
   13253 	}
   13254 
   13255 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13256 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13257 		reg |= MDICNFG_DEST;
   13258 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13259 		reg |= MDICNFG_COM_MDIO;
   13260 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13261 }
   13262 
   13263 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13264 
   13265 static bool
   13266 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13267 {
   13268 	int i;
   13269 	uint32_t reg;
   13270 	uint16_t id1, id2;
   13271 
   13272 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13273 		device_xname(sc->sc_dev), __func__));
   13274 	id1 = id2 = 0xffff;
   13275 	for (i = 0; i < 2; i++) {
   13276 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13277 		if (MII_INVALIDID(id1))
   13278 			continue;
   13279 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13280 		if (MII_INVALIDID(id2))
   13281 			continue;
   13282 		break;
   13283 	}
   13284 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13285 		goto out;
   13286 	}
   13287 
   13288 	if (sc->sc_type < WM_T_PCH_LPT) {
   13289 		sc->phy.release(sc);
   13290 		wm_set_mdio_slow_mode_hv(sc);
   13291 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13292 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13293 		sc->phy.acquire(sc);
   13294 	}
   13295 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13296 		printf("XXX return with false\n");
   13297 		return false;
   13298 	}
   13299 out:
   13300 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13301 		/* Only unforce SMBus if ME is not active */
   13302 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13303 			/* Unforce SMBus mode in PHY */
   13304 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13305 			    CV_SMB_CTRL);
   13306 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13307 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13308 			    CV_SMB_CTRL, reg);
   13309 
   13310 			/* Unforce SMBus mode in MAC */
   13311 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13312 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13313 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13314 		}
   13315 	}
   13316 	return true;
   13317 }
   13318 
   13319 static void
   13320 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13321 {
   13322 	uint32_t reg;
   13323 	int i;
   13324 
   13325 	/* Set PHY Config Counter to 50msec */
   13326 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13327 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13328 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13329 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13330 
   13331 	/* Toggle LANPHYPC */
   13332 	reg = CSR_READ(sc, WMREG_CTRL);
   13333 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13334 	reg &= ~CTRL_LANPHYPC_VALUE;
   13335 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13336 	CSR_WRITE_FLUSH(sc);
   13337 	delay(1000);
   13338 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13339 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13340 	CSR_WRITE_FLUSH(sc);
   13341 
   13342 	if (sc->sc_type < WM_T_PCH_LPT)
   13343 		delay(50 * 1000);
   13344 	else {
   13345 		i = 20;
   13346 
   13347 		do {
   13348 			delay(5 * 1000);
   13349 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13350 		    && i--);
   13351 
   13352 		delay(30 * 1000);
   13353 	}
   13354 }
   13355 
   13356 static int
   13357 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13358 {
   13359 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13360 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13361 	uint32_t rxa;
   13362 	uint16_t scale = 0, lat_enc = 0;
   13363 	int64_t lat_ns, value;
   13364 
   13365 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13366 		device_xname(sc->sc_dev), __func__));
   13367 
   13368 	if (link) {
   13369 		pcireg_t preg;
   13370 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13371 
   13372 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13373 
   13374 		/*
   13375 		 * Determine the maximum latency tolerated by the device.
   13376 		 *
   13377 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13378 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13379 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13380 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13381 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13382 		 */
   13383 		lat_ns = ((int64_t)rxa * 1024 -
   13384 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   13385 		if (lat_ns < 0)
   13386 			lat_ns = 0;
   13387 		else {
   13388 			uint32_t status;
   13389 			uint16_t speed;
   13390 
   13391 			status = CSR_READ(sc, WMREG_STATUS);
   13392 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13393 			case STATUS_SPEED_10:
   13394 				speed = 10;
   13395 				break;
   13396 			case STATUS_SPEED_100:
   13397 				speed = 100;
   13398 				break;
   13399 			case STATUS_SPEED_1000:
   13400 				speed = 1000;
   13401 				break;
   13402 			default:
   13403 				printf("%s: Unknown speed (status = %08x)\n",
   13404 				    device_xname(sc->sc_dev), status);
   13405 				return -1;
   13406 			}
   13407 			lat_ns /= speed;
   13408 		}
   13409 		value = lat_ns;
   13410 
   13411 		while (value > LTRV_VALUE) {
   13412 			scale ++;
   13413 			value = howmany(value, __BIT(5));
   13414 		}
   13415 		if (scale > LTRV_SCALE_MAX) {
   13416 			printf("%s: Invalid LTR latency scale %d\n",
   13417 			    device_xname(sc->sc_dev), scale);
   13418 			return -1;
   13419 		}
   13420 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13421 
   13422 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13423 		    WM_PCI_LTR_CAP_LPT);
   13424 		max_snoop = preg & 0xffff;
   13425 		max_nosnoop = preg >> 16;
   13426 
   13427 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13428 
   13429 		if (lat_enc > max_ltr_enc) {
   13430 			lat_enc = max_ltr_enc;
   13431 		}
   13432 	}
   13433 	/* Snoop and No-Snoop latencies the same */
   13434 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13435 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13436 
   13437 	return 0;
   13438 }
   13439 
   13440 /*
   13441  * I210 Errata 25 and I211 Errata 10
   13442  * Slow System Clock.
   13443  */
   13444 static void
   13445 wm_pll_workaround_i210(struct wm_softc *sc)
   13446 {
   13447 	uint32_t mdicnfg, wuc;
   13448 	uint32_t reg;
   13449 	pcireg_t pcireg;
   13450 	uint32_t pmreg;
   13451 	uint16_t nvmword, tmp_nvmword;
   13452 	int phyval;
   13453 	bool wa_done = false;
   13454 	int i;
   13455 
   13456 	/* Save WUC and MDICNFG registers */
   13457 	wuc = CSR_READ(sc, WMREG_WUC);
   13458 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13459 
   13460 	reg = mdicnfg & ~MDICNFG_DEST;
   13461 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13462 
   13463 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13464 		nvmword = INVM_DEFAULT_AL;
   13465 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13466 
   13467 	/* Get Power Management cap offset */
   13468 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13469 		&pmreg, NULL) == 0)
   13470 		return;
   13471 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13472 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13473 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13474 
   13475 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13476 			break; /* OK */
   13477 		}
   13478 
   13479 		wa_done = true;
   13480 		/* Directly reset the internal PHY */
   13481 		reg = CSR_READ(sc, WMREG_CTRL);
   13482 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13483 
   13484 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13485 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13486 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13487 
   13488 		CSR_WRITE(sc, WMREG_WUC, 0);
   13489 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13490 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13491 
   13492 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13493 		    pmreg + PCI_PMCSR);
   13494 		pcireg |= PCI_PMCSR_STATE_D3;
   13495 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13496 		    pmreg + PCI_PMCSR, pcireg);
   13497 		delay(1000);
   13498 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13499 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13500 		    pmreg + PCI_PMCSR, pcireg);
   13501 
   13502 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13503 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13504 
   13505 		/* Restore WUC register */
   13506 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13507 	}
   13508 
   13509 	/* Restore MDICNFG setting */
   13510 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13511 	if (wa_done)
   13512 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13513 }
   13514